diff options
| author | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-01-21 22:52:54 +0100 |
|---|---|---|
| committer | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-01-21 22:52:54 +0100 |
| commit | dcacc00e3750300617ba6e16eb346713f91a783a (patch) | |
| tree | 38e2d4fb5ed9d119711d4295c6eda4b014af73fd /examples/redis-unstable/deps/jemalloc/include | |
| parent | 58dac10aeb8f5a041c46bddbeaf4c7966a99b998 (diff) | |
| download | crep-dcacc00e3750300617ba6e16eb346713f91a783a.tar.gz | |
Remove testing data
Diffstat (limited to 'examples/redis-unstable/deps/jemalloc/include')
137 files changed, 0 insertions, 21342 deletions
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/activity_callback.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/activity_callback.h deleted file mode 100644 index 6c2e84e..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/activity_callback.h +++ /dev/null | |||
| @@ -1,23 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H | ||
| 2 | #define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * The callback to be executed "periodically", in response to some amount of | ||
| 6 | * allocator activity. | ||
| 7 | * | ||
| 8 | * This callback need not be computing any sort of peak (although that's the | ||
| 9 | * intended first use case), but we drive it from the peak counter, so it's | ||
| 10 | * keeps things tidy to keep it here. | ||
| 11 | * | ||
| 12 | * The calls to this thunk get driven by the peak_event module. | ||
| 13 | */ | ||
| 14 | #define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL} | ||
| 15 | typedef void (*activity_callback_t)(void *uctx, uint64_t allocated, | ||
| 16 | uint64_t deallocated); | ||
| 17 | typedef struct activity_callback_thunk_s activity_callback_thunk_t; | ||
| 18 | struct activity_callback_thunk_s { | ||
| 19 | activity_callback_t callback; | ||
| 20 | void *uctx; | ||
| 21 | }; | ||
| 22 | |||
| 23 | #endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_externs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_externs.h deleted file mode 100644 index e6fceaa..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_externs.h +++ /dev/null | |||
| @@ -1,121 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_ARENA_EXTERNS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bin.h" | ||
| 5 | #include "jemalloc/internal/div.h" | ||
| 6 | #include "jemalloc/internal/extent_dss.h" | ||
| 7 | #include "jemalloc/internal/hook.h" | ||
| 8 | #include "jemalloc/internal/pages.h" | ||
| 9 | #include "jemalloc/internal/stats.h" | ||
| 10 | |||
| 11 | /* | ||
| 12 | * When the amount of pages to be purged exceeds this amount, deferred purge | ||
| 13 | * should happen. | ||
| 14 | */ | ||
| 15 | #define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024) | ||
| 16 | |||
| 17 | extern ssize_t opt_dirty_decay_ms; | ||
| 18 | extern ssize_t opt_muzzy_decay_ms; | ||
| 19 | |||
| 20 | extern percpu_arena_mode_t opt_percpu_arena; | ||
| 21 | extern const char *percpu_arena_mode_names[]; | ||
| 22 | |||
| 23 | extern div_info_t arena_binind_div_info[SC_NBINS]; | ||
| 24 | |||
| 25 | extern malloc_mutex_t arenas_lock; | ||
| 26 | extern emap_t arena_emap_global; | ||
| 27 | |||
| 28 | extern size_t opt_oversize_threshold; | ||
| 29 | extern size_t oversize_threshold; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * arena_bin_offsets[binind] is the offset of the first bin shard for size class | ||
| 33 | * binind. | ||
| 34 | */ | ||
| 35 | extern uint32_t arena_bin_offsets[SC_NBINS]; | ||
| 36 | |||
| 37 | void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, | ||
| 38 | unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, | ||
| 39 | ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); | ||
| 40 | void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, | ||
| 41 | const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, | ||
| 42 | size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, | ||
| 43 | bin_stats_data_t *bstats, arena_stats_large_t *lstats, | ||
| 44 | pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats); | ||
| 45 | void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena); | ||
| 46 | edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, | ||
| 47 | size_t usize, size_t alignment, bool zero); | ||
| 48 | void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, | ||
| 49 | edata_t *edata); | ||
| 50 | void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, | ||
| 51 | edata_t *edata, size_t oldsize); | ||
| 52 | void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, | ||
| 53 | edata_t *edata, size_t oldsize); | ||
| 54 | bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, | ||
| 55 | ssize_t decay_ms); | ||
| 56 | ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state); | ||
| 57 | void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, | ||
| 58 | bool all); | ||
| 59 | uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena); | ||
| 60 | void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena); | ||
| 61 | void arena_reset(tsd_t *tsd, arena_t *arena); | ||
| 62 | void arena_destroy(tsd_t *tsd, arena_t *arena); | ||
| 63 | void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, | ||
| 64 | cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind, | ||
| 65 | const unsigned nfill); | ||
| 66 | |||
| 67 | void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, | ||
| 68 | szind_t ind, bool zero); | ||
| 69 | void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, | ||
| 70 | size_t alignment, bool zero, tcache_t *tcache); | ||
| 71 | void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize); | ||
| 72 | void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, | ||
| 73 | bool slow_path); | ||
| 74 | void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab); | ||
| 75 | |||
| 76 | void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena, | ||
| 77 | edata_t *slab, bin_t *bin); | ||
| 78 | void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena, | ||
| 79 | edata_t *slab, bin_t *bin); | ||
| 80 | void arena_dalloc_small(tsdn_t *tsdn, void *ptr); | ||
| 81 | bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, | ||
| 82 | size_t extra, bool zero, size_t *newsize); | ||
| 83 | void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, | ||
| 84 | size_t size, size_t alignment, bool zero, tcache_t *tcache, | ||
| 85 | hook_ralloc_args_t *hook_args); | ||
| 86 | dss_prec_t arena_dss_prec_get(arena_t *arena); | ||
| 87 | ehooks_t *arena_get_ehooks(arena_t *arena); | ||
| 88 | extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena, | ||
| 89 | extent_hooks_t *extent_hooks); | ||
| 90 | bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); | ||
| 91 | ssize_t arena_dirty_decay_ms_default_get(void); | ||
| 92 | bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); | ||
| 93 | ssize_t arena_muzzy_decay_ms_default_get(void); | ||
| 94 | bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); | ||
| 95 | bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, | ||
| 96 | size_t *old_limit, size_t *new_limit); | ||
| 97 | unsigned arena_nthreads_get(arena_t *arena, bool internal); | ||
| 98 | void arena_nthreads_inc(arena_t *arena, bool internal); | ||
| 99 | void arena_nthreads_dec(arena_t *arena, bool internal); | ||
| 100 | arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config); | ||
| 101 | bool arena_init_huge(void); | ||
| 102 | bool arena_is_huge(unsigned arena_ind); | ||
| 103 | arena_t *arena_choose_huge(tsd_t *tsd); | ||
| 104 | bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, | ||
| 105 | unsigned *binshard); | ||
| 106 | size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, | ||
| 107 | void **ptrs, size_t nfill, bool zero); | ||
| 108 | bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa); | ||
| 109 | void arena_prefork0(tsdn_t *tsdn, arena_t *arena); | ||
| 110 | void arena_prefork1(tsdn_t *tsdn, arena_t *arena); | ||
| 111 | void arena_prefork2(tsdn_t *tsdn, arena_t *arena); | ||
| 112 | void arena_prefork3(tsdn_t *tsdn, arena_t *arena); | ||
| 113 | void arena_prefork4(tsdn_t *tsdn, arena_t *arena); | ||
| 114 | void arena_prefork5(tsdn_t *tsdn, arena_t *arena); | ||
| 115 | void arena_prefork6(tsdn_t *tsdn, arena_t *arena); | ||
| 116 | void arena_prefork7(tsdn_t *tsdn, arena_t *arena); | ||
| 117 | void arena_prefork8(tsdn_t *tsdn, arena_t *arena); | ||
| 118 | void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); | ||
| 119 | void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); | ||
| 120 | |||
| 121 | #endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h deleted file mode 100644 index 8568358..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H | ||
| 2 | #define JEMALLOC_INTERNAL_ARENA_INLINES_A_H | ||
| 3 | |||
| 4 | static inline unsigned | ||
| 5 | arena_ind_get(const arena_t *arena) { | ||
| 6 | return arena->ind; | ||
| 7 | } | ||
| 8 | |||
| 9 | static inline void | ||
| 10 | arena_internal_add(arena_t *arena, size_t size) { | ||
| 11 | atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); | ||
| 12 | } | ||
| 13 | |||
| 14 | static inline void | ||
| 15 | arena_internal_sub(arena_t *arena, size_t size) { | ||
| 16 | atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); | ||
| 17 | } | ||
| 18 | |||
| 19 | static inline size_t | ||
| 20 | arena_internal_get(arena_t *arena) { | ||
| 21 | return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); | ||
| 22 | } | ||
| 23 | |||
| 24 | #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h deleted file mode 100644 index fa81537..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h +++ /dev/null | |||
| @@ -1,550 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H | ||
| 2 | #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/div.h" | ||
| 5 | #include "jemalloc/internal/emap.h" | ||
| 6 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 7 | #include "jemalloc/internal/mutex.h" | ||
| 8 | #include "jemalloc/internal/rtree.h" | ||
| 9 | #include "jemalloc/internal/safety_check.h" | ||
| 10 | #include "jemalloc/internal/sc.h" | ||
| 11 | #include "jemalloc/internal/sz.h" | ||
| 12 | #include "jemalloc/internal/ticker.h" | ||
| 13 | |||
| 14 | static inline arena_t * | ||
| 15 | arena_get_from_edata(edata_t *edata) { | ||
| 16 | return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)], | ||
| 17 | ATOMIC_RELAXED); | ||
| 18 | } | ||
| 19 | |||
| 20 | JEMALLOC_ALWAYS_INLINE arena_t * | ||
| 21 | arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) { | ||
| 22 | if (arena != NULL) { | ||
| 23 | return arena; | ||
| 24 | } | ||
| 25 | |||
| 26 | /* | ||
| 27 | * For huge allocations, use the dedicated huge arena if both are true: | ||
| 28 | * 1) is using auto arena selection (i.e. arena == NULL), and 2) the | ||
| 29 | * thread is not assigned to a manual arena. | ||
| 30 | */ | ||
| 31 | if (unlikely(size >= oversize_threshold)) { | ||
| 32 | arena_t *tsd_arena = tsd_arena_get(tsd); | ||
| 33 | if (tsd_arena == NULL || arena_is_auto(tsd_arena)) { | ||
| 34 | return arena_choose_huge(tsd); | ||
| 35 | } | ||
| 36 | } | ||
| 37 | |||
| 38 | return arena_choose(tsd, NULL); | ||
| 39 | } | ||
| 40 | |||
| 41 | JEMALLOC_ALWAYS_INLINE void | ||
| 42 | arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, | ||
| 43 | prof_info_t *prof_info, bool reset_recent) { | ||
| 44 | cassert(config_prof); | ||
| 45 | assert(ptr != NULL); | ||
| 46 | assert(prof_info != NULL); | ||
| 47 | |||
| 48 | edata_t *edata = NULL; | ||
| 49 | bool is_slab; | ||
| 50 | |||
| 51 | /* Static check. */ | ||
| 52 | if (alloc_ctx == NULL) { | ||
| 53 | edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, | ||
| 54 | ptr); | ||
| 55 | is_slab = edata_slab_get(edata); | ||
| 56 | } else if (unlikely(!(is_slab = alloc_ctx->slab))) { | ||
| 57 | edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, | ||
| 58 | ptr); | ||
| 59 | } | ||
| 60 | |||
| 61 | if (unlikely(!is_slab)) { | ||
| 62 | /* edata must have been initialized at this point. */ | ||
| 63 | assert(edata != NULL); | ||
| 64 | large_prof_info_get(tsd, edata, prof_info, reset_recent); | ||
| 65 | } else { | ||
| 66 | prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U; | ||
| 67 | /* | ||
| 68 | * No need to set other fields in prof_info; they will never be | ||
| 69 | * accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U. | ||
| 70 | */ | ||
| 71 | } | ||
| 72 | } | ||
| 73 | |||
| 74 | JEMALLOC_ALWAYS_INLINE void | ||
| 75 | arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, | ||
| 76 | emap_alloc_ctx_t *alloc_ctx) { | ||
| 77 | cassert(config_prof); | ||
| 78 | assert(ptr != NULL); | ||
| 79 | |||
| 80 | /* Static check. */ | ||
| 81 | if (alloc_ctx == NULL) { | ||
| 82 | edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), | ||
| 83 | &arena_emap_global, ptr); | ||
| 84 | if (unlikely(!edata_slab_get(edata))) { | ||
| 85 | large_prof_tctx_reset(edata); | ||
| 86 | } | ||
| 87 | } else { | ||
| 88 | if (unlikely(!alloc_ctx->slab)) { | ||
| 89 | edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), | ||
| 90 | &arena_emap_global, ptr); | ||
| 91 | large_prof_tctx_reset(edata); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | JEMALLOC_ALWAYS_INLINE void | ||
| 97 | arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) { | ||
| 98 | cassert(config_prof); | ||
| 99 | assert(ptr != NULL); | ||
| 100 | |||
| 101 | edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, | ||
| 102 | ptr); | ||
| 103 | assert(!edata_slab_get(edata)); | ||
| 104 | |||
| 105 | large_prof_tctx_reset(edata); | ||
| 106 | } | ||
| 107 | |||
| 108 | JEMALLOC_ALWAYS_INLINE void | ||
| 109 | arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, | ||
| 110 | size_t size) { | ||
| 111 | cassert(config_prof); | ||
| 112 | |||
| 113 | assert(!edata_slab_get(edata)); | ||
| 114 | large_prof_info_set(edata, tctx, size); | ||
| 115 | } | ||
| 116 | |||
| 117 | JEMALLOC_ALWAYS_INLINE void | ||
| 118 | arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { | ||
| 119 | if (unlikely(tsdn_null(tsdn))) { | ||
| 120 | return; | ||
| 121 | } | ||
| 122 | tsd_t *tsd = tsdn_tsd(tsdn); | ||
| 123 | /* | ||
| 124 | * We use the ticker_geom_t to avoid having per-arena state in the tsd. | ||
| 125 | * Instead of having a countdown-until-decay timer running for every | ||
| 126 | * arena in every thread, we flip a coin once per tick, whose | ||
| 127 | * probability of coming up heads is 1/nticks; this is effectively the | ||
| 128 | * operation of the ticker_geom_t. Each arena has the same chance of a | ||
| 129 | * coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can | ||
| 130 | * use a single ticker for all of them. | ||
| 131 | */ | ||
| 132 | ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd); | ||
| 133 | uint64_t *prng_state = tsd_prng_statep_get(tsd); | ||
| 134 | if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) { | ||
| 135 | arena_decay(tsdn, arena, false, false); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | JEMALLOC_ALWAYS_INLINE void | ||
| 140 | arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { | ||
| 141 | arena_decay_ticks(tsdn, arena, 1); | ||
| 142 | } | ||
| 143 | |||
| 144 | JEMALLOC_ALWAYS_INLINE void * | ||
| 145 | arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, | ||
| 146 | tcache_t *tcache, bool slow_path) { | ||
| 147 | assert(!tsdn_null(tsdn) || tcache == NULL); | ||
| 148 | |||
| 149 | if (likely(tcache != NULL)) { | ||
| 150 | if (likely(size <= SC_SMALL_MAXCLASS)) { | ||
| 151 | return tcache_alloc_small(tsdn_tsd(tsdn), arena, | ||
| 152 | tcache, size, ind, zero, slow_path); | ||
| 153 | } | ||
| 154 | if (likely(size <= tcache_maxclass)) { | ||
| 155 | return tcache_alloc_large(tsdn_tsd(tsdn), arena, | ||
| 156 | tcache, size, ind, zero, slow_path); | ||
| 157 | } | ||
| 158 | /* (size > tcache_maxclass) case falls through. */ | ||
| 159 | assert(size > tcache_maxclass); | ||
| 160 | } | ||
| 161 | |||
| 162 | return arena_malloc_hard(tsdn, arena, size, ind, zero); | ||
| 163 | } | ||
| 164 | |||
| 165 | JEMALLOC_ALWAYS_INLINE arena_t * | ||
| 166 | arena_aalloc(tsdn_t *tsdn, const void *ptr) { | ||
| 167 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); | ||
| 168 | unsigned arena_ind = edata_arena_ind_get(edata); | ||
| 169 | return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED); | ||
| 170 | } | ||
| 171 | |||
| 172 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 173 | arena_salloc(tsdn_t *tsdn, const void *ptr) { | ||
| 174 | assert(ptr != NULL); | ||
| 175 | emap_alloc_ctx_t alloc_ctx; | ||
| 176 | emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx); | ||
| 177 | assert(alloc_ctx.szind != SC_NSIZES); | ||
| 178 | |||
| 179 | return sz_index2size(alloc_ctx.szind); | ||
| 180 | } | ||
| 181 | |||
| 182 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 183 | arena_vsalloc(tsdn_t *tsdn, const void *ptr) { | ||
| 184 | /* | ||
| 185 | * Return 0 if ptr is not within an extent managed by jemalloc. This | ||
| 186 | * function has two extra costs relative to isalloc(): | ||
| 187 | * - The rtree calls cannot claim to be dependent lookups, which induces | ||
| 188 | * rtree lookup load dependencies. | ||
| 189 | * - The lookup may fail, so there is an extra branch to check for | ||
| 190 | * failure. | ||
| 191 | */ | ||
| 192 | |||
| 193 | emap_full_alloc_ctx_t full_alloc_ctx; | ||
| 194 | bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global, | ||
| 195 | ptr, &full_alloc_ctx); | ||
| 196 | if (missing) { | ||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | if (full_alloc_ctx.edata == NULL) { | ||
| 201 | return 0; | ||
| 202 | } | ||
| 203 | assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active); | ||
| 204 | /* Only slab members should be looked up via interior pointers. */ | ||
| 205 | assert(edata_addr_get(full_alloc_ctx.edata) == ptr | ||
| 206 | || edata_slab_get(full_alloc_ctx.edata)); | ||
| 207 | |||
| 208 | assert(full_alloc_ctx.szind != SC_NSIZES); | ||
| 209 | |||
| 210 | return sz_index2size(full_alloc_ctx.szind); | ||
| 211 | } | ||
| 212 | |||
| 213 | JEMALLOC_ALWAYS_INLINE bool | ||
| 214 | large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) { | ||
| 215 | if (!config_opt_safety_checks) { | ||
| 216 | return false; | ||
| 217 | } | ||
| 218 | |||
| 219 | /* | ||
| 220 | * Eagerly detect double free and sized dealloc bugs for large sizes. | ||
| 221 | * The cost is low enough (as edata will be accessed anyway) to be | ||
| 222 | * enabled all the time. | ||
| 223 | */ | ||
| 224 | if (unlikely(edata == NULL || | ||
| 225 | edata_state_get(edata) != extent_state_active)) { | ||
| 226 | safety_check_fail("Invalid deallocation detected: " | ||
| 227 | "pages being freed (%p) not currently active, " | ||
| 228 | "possibly caused by double free bugs.", | ||
| 229 | (uintptr_t)edata_addr_get(edata)); | ||
| 230 | return true; | ||
| 231 | } | ||
| 232 | size_t input_size = sz_index2size(szind); | ||
| 233 | if (unlikely(input_size != edata_usize_get(edata))) { | ||
| 234 | safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr, | ||
| 235 | /* true_size */ edata_usize_get(edata), input_size); | ||
| 236 | return true; | ||
| 237 | } | ||
| 238 | |||
| 239 | return false; | ||
| 240 | } | ||
| 241 | |||
| 242 | static inline void | ||
| 243 | arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) { | ||
| 244 | if (config_prof && unlikely(szind < SC_NBINS)) { | ||
| 245 | arena_dalloc_promoted(tsdn, ptr, NULL, true); | ||
| 246 | } else { | ||
| 247 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, | ||
| 248 | ptr); | ||
| 249 | if (large_dalloc_safety_checks(edata, ptr, szind)) { | ||
| 250 | /* See the comment in isfree. */ | ||
| 251 | return; | ||
| 252 | } | ||
| 253 | large_dalloc(tsdn, edata); | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | static inline void | ||
| 258 | arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { | ||
| 259 | assert(ptr != NULL); | ||
| 260 | |||
| 261 | emap_alloc_ctx_t alloc_ctx; | ||
| 262 | emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx); | ||
| 263 | |||
| 264 | if (config_debug) { | ||
| 265 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, | ||
| 266 | ptr); | ||
| 267 | assert(alloc_ctx.szind == edata_szind_get(edata)); | ||
| 268 | assert(alloc_ctx.szind < SC_NSIZES); | ||
| 269 | assert(alloc_ctx.slab == edata_slab_get(edata)); | ||
| 270 | } | ||
| 271 | |||
| 272 | if (likely(alloc_ctx.slab)) { | ||
| 273 | /* Small allocation. */ | ||
| 274 | arena_dalloc_small(tsdn, ptr); | ||
| 275 | } else { | ||
| 276 | arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind); | ||
| 277 | } | ||
| 278 | } | ||
| 279 | |||
| 280 | JEMALLOC_ALWAYS_INLINE void | ||
| 281 | arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind, | ||
| 282 | bool slow_path) { | ||
| 283 | if (szind < nhbins) { | ||
| 284 | if (config_prof && unlikely(szind < SC_NBINS)) { | ||
| 285 | arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); | ||
| 286 | } else { | ||
| 287 | tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, | ||
| 288 | slow_path); | ||
| 289 | } | ||
| 290 | } else { | ||
| 291 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, | ||
| 292 | ptr); | ||
| 293 | if (large_dalloc_safety_checks(edata, ptr, szind)) { | ||
| 294 | /* See the comment in isfree. */ | ||
| 295 | return; | ||
| 296 | } | ||
| 297 | large_dalloc(tsdn, edata); | ||
| 298 | } | ||
| 299 | } | ||
| 300 | |||
| 301 | JEMALLOC_ALWAYS_INLINE void | ||
| 302 | arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, | ||
| 303 | emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) { | ||
| 304 | assert(!tsdn_null(tsdn) || tcache == NULL); | ||
| 305 | assert(ptr != NULL); | ||
| 306 | |||
| 307 | if (unlikely(tcache == NULL)) { | ||
| 308 | arena_dalloc_no_tcache(tsdn, ptr); | ||
| 309 | return; | ||
| 310 | } | ||
| 311 | |||
| 312 | emap_alloc_ctx_t alloc_ctx; | ||
| 313 | if (caller_alloc_ctx != NULL) { | ||
| 314 | alloc_ctx = *caller_alloc_ctx; | ||
| 315 | } else { | ||
| 316 | util_assume(!tsdn_null(tsdn)); | ||
| 317 | emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, | ||
| 318 | &alloc_ctx); | ||
| 319 | } | ||
| 320 | |||
| 321 | if (config_debug) { | ||
| 322 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, | ||
| 323 | ptr); | ||
| 324 | assert(alloc_ctx.szind == edata_szind_get(edata)); | ||
| 325 | assert(alloc_ctx.szind < SC_NSIZES); | ||
| 326 | assert(alloc_ctx.slab == edata_slab_get(edata)); | ||
| 327 | } | ||
| 328 | |||
| 329 | if (likely(alloc_ctx.slab)) { | ||
| 330 | /* Small allocation. */ | ||
| 331 | tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, | ||
| 332 | alloc_ctx.szind, slow_path); | ||
| 333 | } else { | ||
| 334 | arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, | ||
| 335 | slow_path); | ||
| 336 | } | ||
| 337 | } | ||
| 338 | |||
| 339 | static inline void | ||
| 340 | arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { | ||
| 341 | assert(ptr != NULL); | ||
| 342 | assert(size <= SC_LARGE_MAXCLASS); | ||
| 343 | |||
| 344 | emap_alloc_ctx_t alloc_ctx; | ||
| 345 | if (!config_prof || !opt_prof) { | ||
| 346 | /* | ||
| 347 | * There is no risk of being confused by a promoted sampled | ||
| 348 | * object, so base szind and slab on the given size. | ||
| 349 | */ | ||
| 350 | alloc_ctx.szind = sz_size2index(size); | ||
| 351 | alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); | ||
| 352 | } | ||
| 353 | |||
| 354 | if ((config_prof && opt_prof) || config_debug) { | ||
| 355 | emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, | ||
| 356 | &alloc_ctx); | ||
| 357 | |||
| 358 | assert(alloc_ctx.szind == sz_size2index(size)); | ||
| 359 | assert((config_prof && opt_prof) | ||
| 360 | || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS)); | ||
| 361 | |||
| 362 | if (config_debug) { | ||
| 363 | edata_t *edata = emap_edata_lookup(tsdn, | ||
| 364 | &arena_emap_global, ptr); | ||
| 365 | assert(alloc_ctx.szind == edata_szind_get(edata)); | ||
| 366 | assert(alloc_ctx.slab == edata_slab_get(edata)); | ||
| 367 | } | ||
| 368 | } | ||
| 369 | |||
| 370 | if (likely(alloc_ctx.slab)) { | ||
| 371 | /* Small allocation. */ | ||
| 372 | arena_dalloc_small(tsdn, ptr); | ||
| 373 | } else { | ||
| 374 | arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind); | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 378 | JEMALLOC_ALWAYS_INLINE void | ||
| 379 | arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, | ||
| 380 | emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) { | ||
| 381 | assert(!tsdn_null(tsdn) || tcache == NULL); | ||
| 382 | assert(ptr != NULL); | ||
| 383 | assert(size <= SC_LARGE_MAXCLASS); | ||
| 384 | |||
| 385 | if (unlikely(tcache == NULL)) { | ||
| 386 | arena_sdalloc_no_tcache(tsdn, ptr, size); | ||
| 387 | return; | ||
| 388 | } | ||
| 389 | |||
| 390 | emap_alloc_ctx_t alloc_ctx; | ||
| 391 | if (config_prof && opt_prof) { | ||
| 392 | if (caller_alloc_ctx == NULL) { | ||
| 393 | /* Uncommon case and should be a static check. */ | ||
| 394 | emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, | ||
| 395 | &alloc_ctx); | ||
| 396 | assert(alloc_ctx.szind == sz_size2index(size)); | ||
| 397 | } else { | ||
| 398 | alloc_ctx = *caller_alloc_ctx; | ||
| 399 | } | ||
| 400 | } else { | ||
| 401 | /* | ||
| 402 | * There is no risk of being confused by a promoted sampled | ||
| 403 | * object, so base szind and slab on the given size. | ||
| 404 | */ | ||
| 405 | alloc_ctx.szind = sz_size2index(size); | ||
| 406 | alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); | ||
| 407 | } | ||
| 408 | |||
| 409 | if (config_debug) { | ||
| 410 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, | ||
| 411 | ptr); | ||
| 412 | assert(alloc_ctx.szind == edata_szind_get(edata)); | ||
| 413 | assert(alloc_ctx.slab == edata_slab_get(edata)); | ||
| 414 | } | ||
| 415 | |||
| 416 | if (likely(alloc_ctx.slab)) { | ||
| 417 | /* Small allocation. */ | ||
| 418 | tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, | ||
| 419 | alloc_ctx.szind, slow_path); | ||
| 420 | } else { | ||
| 421 | arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, | ||
| 422 | slow_path); | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | static inline void | ||
| 427 | arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata, | ||
| 428 | size_t alignment) { | ||
| 429 | assert(edata_base_get(edata) == edata_addr_get(edata)); | ||
| 430 | |||
| 431 | if (alignment < PAGE) { | ||
| 432 | unsigned lg_range = LG_PAGE - | ||
| 433 | lg_floor(CACHELINE_CEILING(alignment)); | ||
| 434 | size_t r; | ||
| 435 | if (!tsdn_null(tsdn)) { | ||
| 436 | tsd_t *tsd = tsdn_tsd(tsdn); | ||
| 437 | r = (size_t)prng_lg_range_u64( | ||
| 438 | tsd_prng_statep_get(tsd), lg_range); | ||
| 439 | } else { | ||
| 440 | uint64_t stack_value = (uint64_t)(uintptr_t)&r; | ||
| 441 | r = (size_t)prng_lg_range_u64(&stack_value, lg_range); | ||
| 442 | } | ||
| 443 | uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - | ||
| 444 | lg_range); | ||
| 445 | edata->e_addr = (void *)((uintptr_t)edata->e_addr + | ||
| 446 | random_offset); | ||
| 447 | assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) == | ||
| 448 | edata->e_addr); | ||
| 449 | } | ||
| 450 | } | ||
| 451 | |||
| 452 | /* | ||
| 453 | * The dalloc bin info contains just the information that the common paths need | ||
| 454 | * during tcache flushes. By force-inlining these paths, and using local copies | ||
| 455 | * of data (so that the compiler knows it's constant), we avoid a whole bunch of | ||
| 456 | * redundant loads and stores by leaving this information in registers. | ||
| 457 | */ | ||
| 458 | typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t; | ||
| 459 | struct arena_dalloc_bin_locked_info_s { | ||
| 460 | div_info_t div_info; | ||
| 461 | uint32_t nregs; | ||
| 462 | uint64_t ndalloc; | ||
| 463 | }; | ||
| 464 | |||
| 465 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 466 | arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind, | ||
| 467 | edata_t *slab, const void *ptr) { | ||
| 468 | size_t diff, regind; | ||
| 469 | |||
| 470 | /* Freeing a pointer outside the slab can cause assertion failure. */ | ||
| 471 | assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab)); | ||
| 472 | assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab)); | ||
| 473 | /* Freeing an interior pointer can cause assertion failure. */ | ||
| 474 | assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) % | ||
| 475 | (uintptr_t)bin_infos[binind].reg_size == 0); | ||
| 476 | |||
| 477 | diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)); | ||
| 478 | |||
| 479 | /* Avoid doing division with a variable divisor. */ | ||
| 480 | regind = div_compute(&info->div_info, diff); | ||
| 481 | |||
| 482 | assert(regind < bin_infos[binind].nregs); | ||
| 483 | |||
| 484 | return regind; | ||
| 485 | } | ||
| 486 | |||
| 487 | JEMALLOC_ALWAYS_INLINE void | ||
| 488 | arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info, | ||
| 489 | szind_t binind) { | ||
| 490 | info->div_info = arena_binind_div_info[binind]; | ||
| 491 | info->nregs = bin_infos[binind].nregs; | ||
| 492 | info->ndalloc = 0; | ||
| 493 | } | ||
| 494 | |||
| 495 | /* | ||
| 496 | * Does the deallocation work associated with freeing a single pointer (a | ||
| 497 | * "step") in between a arena_dalloc_bin_locked begin and end call. | ||
| 498 | * | ||
| 499 | * Returns true if arena_slab_dalloc must be called on slab. Doesn't do | ||
| 500 | * stats updates, which happen during finish (this lets running counts get left | ||
| 501 | * in a register). | ||
| 502 | */ | ||
| 503 | JEMALLOC_ALWAYS_INLINE bool | ||
| 504 | arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin, | ||
| 505 | arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab, | ||
| 506 | void *ptr) { | ||
| 507 | const bin_info_t *bin_info = &bin_infos[binind]; | ||
| 508 | size_t regind = arena_slab_regind(info, binind, slab, ptr); | ||
| 509 | slab_data_t *slab_data = edata_slab_data_get(slab); | ||
| 510 | |||
| 511 | assert(edata_nfree_get(slab) < bin_info->nregs); | ||
| 512 | /* Freeing an unallocated pointer can cause assertion failure. */ | ||
| 513 | assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); | ||
| 514 | |||
| 515 | bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); | ||
| 516 | edata_nfree_inc(slab); | ||
| 517 | |||
| 518 | if (config_stats) { | ||
| 519 | info->ndalloc++; | ||
| 520 | } | ||
| 521 | |||
| 522 | unsigned nfree = edata_nfree_get(slab); | ||
| 523 | if (nfree == bin_info->nregs) { | ||
| 524 | arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab, | ||
| 525 | bin); | ||
| 526 | return true; | ||
| 527 | } else if (nfree == 1 && slab != bin->slabcur) { | ||
| 528 | arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab, | ||
| 529 | bin); | ||
| 530 | } | ||
| 531 | return false; | ||
| 532 | } | ||
| 533 | |||
| 534 | JEMALLOC_ALWAYS_INLINE void | ||
| 535 | arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin, | ||
| 536 | arena_dalloc_bin_locked_info_t *info) { | ||
| 537 | if (config_stats) { | ||
| 538 | bin->stats.ndalloc += info->ndalloc; | ||
| 539 | assert(bin->stats.curregs >= (size_t)info->ndalloc); | ||
| 540 | bin->stats.curregs -= (size_t)info->ndalloc; | ||
| 541 | } | ||
| 542 | } | ||
| 543 | |||
| 544 | static inline bin_t * | ||
| 545 | arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) { | ||
| 546 | bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]); | ||
| 547 | return shard0 + binshard; | ||
| 548 | } | ||
| 549 | |||
| 550 | #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_stats.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_stats.h deleted file mode 100644 index 15f1d34..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_stats.h +++ /dev/null | |||
| @@ -1,114 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H | ||
| 2 | #define JEMALLOC_INTERNAL_ARENA_STATS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/lockedint.h" | ||
| 6 | #include "jemalloc/internal/mutex.h" | ||
| 7 | #include "jemalloc/internal/mutex_prof.h" | ||
| 8 | #include "jemalloc/internal/pa.h" | ||
| 9 | #include "jemalloc/internal/sc.h" | ||
| 10 | |||
| 11 | JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS | ||
| 12 | |||
| 13 | typedef struct arena_stats_large_s arena_stats_large_t; | ||
| 14 | struct arena_stats_large_s { | ||
| 15 | /* | ||
| 16 | * Total number of allocation/deallocation requests served directly by | ||
| 17 | * the arena. | ||
| 18 | */ | ||
| 19 | locked_u64_t nmalloc; | ||
| 20 | locked_u64_t ndalloc; | ||
| 21 | |||
| 22 | /* | ||
| 23 | * Number of allocation requests that correspond to this size class. | ||
| 24 | * This includes requests served by tcache, though tcache only | ||
| 25 | * periodically merges into this counter. | ||
| 26 | */ | ||
| 27 | locked_u64_t nrequests; /* Partially derived. */ | ||
| 28 | /* | ||
| 29 | * Number of tcache fills / flushes for large (similarly, periodically | ||
| 30 | * merged). Note that there is no large tcache batch-fill currently | ||
| 31 | * (i.e. only fill 1 at a time); however flush may be batched. | ||
| 32 | */ | ||
| 33 | locked_u64_t nfills; /* Partially derived. */ | ||
| 34 | locked_u64_t nflushes; /* Partially derived. */ | ||
| 35 | |||
| 36 | /* Current number of allocations of this size class. */ | ||
| 37 | size_t curlextents; /* Derived. */ | ||
| 38 | }; | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Arena stats. Note that fields marked "derived" are not directly maintained | ||
| 42 | * within the arena code; rather their values are derived during stats merge | ||
| 43 | * requests. | ||
| 44 | */ | ||
| 45 | typedef struct arena_stats_s arena_stats_t; | ||
| 46 | struct arena_stats_s { | ||
| 47 | LOCKEDINT_MTX_DECLARE(mtx) | ||
| 48 | |||
| 49 | /* | ||
| 50 | * resident includes the base stats -- that's why it lives here and not | ||
| 51 | * in pa_shard_stats_t. | ||
| 52 | */ | ||
| 53 | size_t base; /* Derived. */ | ||
| 54 | size_t resident; /* Derived. */ | ||
| 55 | size_t metadata_thp; /* Derived. */ | ||
| 56 | size_t mapped; /* Derived. */ | ||
| 57 | |||
| 58 | atomic_zu_t internal; | ||
| 59 | |||
| 60 | size_t allocated_large; /* Derived. */ | ||
| 61 | uint64_t nmalloc_large; /* Derived. */ | ||
| 62 | uint64_t ndalloc_large; /* Derived. */ | ||
| 63 | uint64_t nfills_large; /* Derived. */ | ||
| 64 | uint64_t nflushes_large; /* Derived. */ | ||
| 65 | uint64_t nrequests_large; /* Derived. */ | ||
| 66 | |||
| 67 | /* | ||
| 68 | * The stats logically owned by the pa_shard in the same arena. This | ||
| 69 | * lives here only because it's convenient for the purposes of the ctl | ||
| 70 | * module -- it only knows about the single arena_stats. | ||
| 71 | */ | ||
| 72 | pa_shard_stats_t pa_shard_stats; | ||
| 73 | |||
| 74 | /* Number of bytes cached in tcache associated with this arena. */ | ||
| 75 | size_t tcache_bytes; /* Derived. */ | ||
| 76 | size_t tcache_stashed_bytes; /* Derived. */ | ||
| 77 | |||
| 78 | mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; | ||
| 79 | |||
| 80 | /* One element for each large size class. */ | ||
| 81 | arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; | ||
| 82 | |||
| 83 | /* Arena uptime. */ | ||
| 84 | nstime_t uptime; | ||
| 85 | }; | ||
| 86 | |||
| 87 | static inline bool | ||
| 88 | arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { | ||
| 89 | if (config_debug) { | ||
| 90 | for (size_t i = 0; i < sizeof(arena_stats_t); i++) { | ||
| 91 | assert(((char *)arena_stats)[i] == 0); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats", | ||
| 95 | WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { | ||
| 96 | return true; | ||
| 97 | } | ||
| 98 | /* Memory is zeroed, so there is no need to clear stats. */ | ||
| 99 | return false; | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline void | ||
| 103 | arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, | ||
| 104 | szind_t szind, uint64_t nrequests) { | ||
| 105 | LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx); | ||
| 106 | arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS]; | ||
| 107 | locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx), | ||
| 108 | &lstats->nrequests, nrequests); | ||
| 109 | locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx), | ||
| 110 | &lstats->nflushes, 1); | ||
| 111 | LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx); | ||
| 112 | } | ||
| 113 | |||
| 114 | #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_structs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_structs.h deleted file mode 100644 index e2a5a40..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_structs.h +++ /dev/null | |||
| @@ -1,101 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H | ||
| 2 | #define JEMALLOC_INTERNAL_ARENA_STRUCTS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/arena_stats.h" | ||
| 5 | #include "jemalloc/internal/atomic.h" | ||
| 6 | #include "jemalloc/internal/bin.h" | ||
| 7 | #include "jemalloc/internal/bitmap.h" | ||
| 8 | #include "jemalloc/internal/counter.h" | ||
| 9 | #include "jemalloc/internal/ecache.h" | ||
| 10 | #include "jemalloc/internal/edata_cache.h" | ||
| 11 | #include "jemalloc/internal/extent_dss.h" | ||
| 12 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 13 | #include "jemalloc/internal/mutex.h" | ||
| 14 | #include "jemalloc/internal/nstime.h" | ||
| 15 | #include "jemalloc/internal/pa.h" | ||
| 16 | #include "jemalloc/internal/ql.h" | ||
| 17 | #include "jemalloc/internal/sc.h" | ||
| 18 | #include "jemalloc/internal/ticker.h" | ||
| 19 | |||
| 20 | struct arena_s { | ||
| 21 | /* | ||
| 22 | * Number of threads currently assigned to this arena. Each thread has | ||
| 23 | * two distinct assignments, one for application-serving allocation, and | ||
| 24 | * the other for internal metadata allocation. Internal metadata must | ||
| 25 | * not be allocated from arenas explicitly created via the arenas.create | ||
| 26 | * mallctl, because the arena.<i>.reset mallctl indiscriminately | ||
| 27 | * discards all allocations for the affected arena. | ||
| 28 | * | ||
| 29 | * 0: Application allocation. | ||
| 30 | * 1: Internal metadata allocation. | ||
| 31 | * | ||
| 32 | * Synchronization: atomic. | ||
| 33 | */ | ||
| 34 | atomic_u_t nthreads[2]; | ||
| 35 | |||
| 36 | /* Next bin shard for binding new threads. Synchronization: atomic. */ | ||
| 37 | atomic_u_t binshard_next; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * When percpu_arena is enabled, to amortize the cost of reading / | ||
| 41 | * updating the current CPU id, track the most recent thread accessing | ||
| 42 | * this arena, and only read CPU if there is a mismatch. | ||
| 43 | */ | ||
| 44 | tsdn_t *last_thd; | ||
| 45 | |||
| 46 | /* Synchronization: internal. */ | ||
| 47 | arena_stats_t stats; | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Lists of tcaches and cache_bin_array_descriptors for extant threads | ||
| 51 | * associated with this arena. Stats from these are merged | ||
| 52 | * incrementally, and at exit if opt_stats_print is enabled. | ||
| 53 | * | ||
| 54 | * Synchronization: tcache_ql_mtx. | ||
| 55 | */ | ||
| 56 | ql_head(tcache_slow_t) tcache_ql; | ||
| 57 | ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; | ||
| 58 | malloc_mutex_t tcache_ql_mtx; | ||
| 59 | |||
| 60 | /* | ||
| 61 | * Represents a dss_prec_t, but atomically. | ||
| 62 | * | ||
| 63 | * Synchronization: atomic. | ||
| 64 | */ | ||
| 65 | atomic_u_t dss_prec; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Extant large allocations. | ||
| 69 | * | ||
| 70 | * Synchronization: large_mtx. | ||
| 71 | */ | ||
| 72 | edata_list_active_t large; | ||
| 73 | /* Synchronizes all large allocation/update/deallocation. */ | ||
| 74 | malloc_mutex_t large_mtx; | ||
| 75 | |||
| 76 | /* The page-level allocator shard this arena uses. */ | ||
| 77 | pa_shard_t pa_shard; | ||
| 78 | |||
| 79 | /* | ||
| 80 | * A cached copy of base->ind. This can get accessed on hot paths; | ||
| 81 | * looking it up in base requires an extra pointer hop / cache miss. | ||
| 82 | */ | ||
| 83 | unsigned ind; | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Base allocator, from which arena metadata are allocated. | ||
| 87 | * | ||
| 88 | * Synchronization: internal. | ||
| 89 | */ | ||
| 90 | base_t *base; | ||
| 91 | /* Used to determine uptime. Read-only after initialization. */ | ||
| 92 | nstime_t create_time; | ||
| 93 | |||
| 94 | /* | ||
| 95 | * The arena is allocated alongside its bins; really this is a | ||
| 96 | * dynamically sized array determined by the binshard settings. | ||
| 97 | */ | ||
| 98 | bin_t bins[0]; | ||
| 99 | }; | ||
| 100 | |||
| 101 | #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_types.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_types.h deleted file mode 100644 index d0e1291..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/arena_types.h +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H | ||
| 2 | #define JEMALLOC_INTERNAL_ARENA_TYPES_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/sc.h" | ||
| 5 | |||
| 6 | /* Default decay times in milliseconds. */ | ||
| 7 | #define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) | ||
| 8 | #define MUZZY_DECAY_MS_DEFAULT (0) | ||
| 9 | /* Number of event ticks between time checks. */ | ||
| 10 | #define ARENA_DECAY_NTICKS_PER_UPDATE 1000 | ||
| 11 | |||
| 12 | typedef struct arena_decay_s arena_decay_t; | ||
| 13 | typedef struct arena_s arena_t; | ||
| 14 | |||
| 15 | typedef enum { | ||
| 16 | percpu_arena_mode_names_base = 0, /* Used for options processing. */ | ||
| 17 | |||
| 18 | /* | ||
| 19 | * *_uninit are used only during bootstrapping, and must correspond | ||
| 20 | * to initialized variant plus percpu_arena_mode_enabled_base. | ||
| 21 | */ | ||
| 22 | percpu_arena_uninit = 0, | ||
| 23 | per_phycpu_arena_uninit = 1, | ||
| 24 | |||
| 25 | /* All non-disabled modes must come after percpu_arena_disabled. */ | ||
| 26 | percpu_arena_disabled = 2, | ||
| 27 | |||
| 28 | percpu_arena_mode_names_limit = 3, /* Used for options processing. */ | ||
| 29 | percpu_arena_mode_enabled_base = 3, | ||
| 30 | |||
| 31 | percpu_arena = 3, | ||
| 32 | per_phycpu_arena = 4 /* Hyper threads share arena. */ | ||
| 33 | } percpu_arena_mode_t; | ||
| 34 | |||
| 35 | #define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) | ||
| 36 | #define PERCPU_ARENA_DEFAULT percpu_arena_disabled | ||
| 37 | |||
| 38 | /* | ||
| 39 | * When allocation_size >= oversize_threshold, use the dedicated huge arena | ||
| 40 | * (unless have explicitly spicified arena index). 0 disables the feature. | ||
| 41 | */ | ||
| 42 | #define OVERSIZE_THRESHOLD_DEFAULT (8 << 20) | ||
| 43 | |||
| 44 | struct arena_config_s { | ||
| 45 | /* extent hooks to be used for the arena */ | ||
| 46 | extent_hooks_t *extent_hooks; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Use extent hooks for metadata (base) allocations when true. | ||
| 50 | */ | ||
| 51 | bool metadata_use_hooks; | ||
| 52 | }; | ||
| 53 | |||
| 54 | typedef struct arena_config_s arena_config_t; | ||
| 55 | |||
| 56 | extern const arena_config_t arena_config_default; | ||
| 57 | |||
| 58 | #endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/assert.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/assert.h deleted file mode 100644 index be4d45b..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/assert.h +++ /dev/null | |||
| @@ -1,56 +0,0 @@ | |||
| 1 | #include "jemalloc/internal/malloc_io.h" | ||
| 2 | #include "jemalloc/internal/util.h" | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Define a custom assert() in order to reduce the chances of deadlock during | ||
| 6 | * assertion failure. | ||
| 7 | */ | ||
| 8 | #ifndef assert | ||
| 9 | #define assert(e) do { \ | ||
| 10 | if (unlikely(config_debug && !(e))) { \ | ||
| 11 | malloc_printf( \ | ||
| 12 | "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ | ||
| 13 | __FILE__, __LINE__, #e); \ | ||
| 14 | abort(); \ | ||
| 15 | } \ | ||
| 16 | } while (0) | ||
| 17 | #endif | ||
| 18 | |||
| 19 | #ifndef not_reached | ||
| 20 | #define not_reached() do { \ | ||
| 21 | if (config_debug) { \ | ||
| 22 | malloc_printf( \ | ||
| 23 | "<jemalloc>: %s:%d: Unreachable code reached\n", \ | ||
| 24 | __FILE__, __LINE__); \ | ||
| 25 | abort(); \ | ||
| 26 | } \ | ||
| 27 | unreachable(); \ | ||
| 28 | } while (0) | ||
| 29 | #endif | ||
| 30 | |||
| 31 | #ifndef not_implemented | ||
| 32 | #define not_implemented() do { \ | ||
| 33 | if (config_debug) { \ | ||
| 34 | malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ | ||
| 35 | __FILE__, __LINE__); \ | ||
| 36 | abort(); \ | ||
| 37 | } \ | ||
| 38 | } while (0) | ||
| 39 | #endif | ||
| 40 | |||
| 41 | #ifndef assert_not_implemented | ||
| 42 | #define assert_not_implemented(e) do { \ | ||
| 43 | if (unlikely(config_debug && !(e))) { \ | ||
| 44 | not_implemented(); \ | ||
| 45 | } \ | ||
| 46 | } while (0) | ||
| 47 | #endif | ||
| 48 | |||
| 49 | /* Use to assert a particular configuration, e.g., cassert(config_debug). */ | ||
| 50 | #ifndef cassert | ||
| 51 | #define cassert(c) do { \ | ||
| 52 | if (unlikely(!(c))) { \ | ||
| 53 | not_reached(); \ | ||
| 54 | } \ | ||
| 55 | } while (0) | ||
| 56 | #endif | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic.h deleted file mode 100644 index c0f7312..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic.h +++ /dev/null | |||
| @@ -1,107 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ATOMIC_H | ||
| 2 | #define JEMALLOC_INTERNAL_ATOMIC_H | ||
| 3 | |||
| 4 | #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE | ||
| 5 | |||
| 6 | #define JEMALLOC_U8_ATOMICS | ||
| 7 | #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) | ||
| 8 | # include "jemalloc/internal/atomic_gcc_atomic.h" | ||
| 9 | # if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS) | ||
| 10 | # undef JEMALLOC_U8_ATOMICS | ||
| 11 | # endif | ||
| 12 | #elif defined(JEMALLOC_GCC_SYNC_ATOMICS) | ||
| 13 | # include "jemalloc/internal/atomic_gcc_sync.h" | ||
| 14 | # if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS) | ||
| 15 | # undef JEMALLOC_U8_ATOMICS | ||
| 16 | # endif | ||
| 17 | #elif defined(_MSC_VER) | ||
| 18 | # include "jemalloc/internal/atomic_msvc.h" | ||
| 19 | #elif defined(JEMALLOC_C11_ATOMICS) | ||
| 20 | # include "jemalloc/internal/atomic_c11.h" | ||
| 21 | #else | ||
| 22 | # error "Don't have atomics implemented on this platform." | ||
| 23 | #endif | ||
| 24 | |||
| 25 | /* | ||
| 26 | * This header gives more or less a backport of C11 atomics. The user can write | ||
| 27 | * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate | ||
| 28 | * counterparts of the C11 atomic functions for type, as so: | ||
| 29 | * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); | ||
| 30 | * and then write things like: | ||
| 31 | * int *some_ptr; | ||
| 32 | * atomic_pi_t atomic_ptr_to_int; | ||
| 33 | * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); | ||
| 34 | * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); | ||
| 35 | * assert(some_ptr == prev_value); | ||
| 36 | * and expect things to work in the obvious way. | ||
| 37 | * | ||
| 38 | * Also included (with naming differences to avoid conflicts with the standard | ||
| 39 | * library): | ||
| 40 | * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). | ||
| 41 | * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). | ||
| 42 | */ | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Pure convenience, so that we don't have to type "atomic_memory_order_" | ||
| 46 | * quite so often. | ||
| 47 | */ | ||
| 48 | #define ATOMIC_RELAXED atomic_memory_order_relaxed | ||
| 49 | #define ATOMIC_ACQUIRE atomic_memory_order_acquire | ||
| 50 | #define ATOMIC_RELEASE atomic_memory_order_release | ||
| 51 | #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel | ||
| 52 | #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst | ||
| 53 | |||
| 54 | /* | ||
| 55 | * Another convenience -- simple atomic helper functions. | ||
| 56 | */ | ||
| 57 | #define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \ | ||
| 58 | lg_size) \ | ||
| 59 | JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ | ||
| 60 | ATOMIC_INLINE void \ | ||
| 61 | atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \ | ||
| 62 | type inc) { \ | ||
| 63 | type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ | ||
| 64 | type newval = oldval + inc; \ | ||
| 65 | atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ | ||
| 66 | } \ | ||
| 67 | ATOMIC_INLINE void \ | ||
| 68 | atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \ | ||
| 69 | type inc) { \ | ||
| 70 | type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ | ||
| 71 | type newval = oldval - inc; \ | ||
| 72 | atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ | ||
| 73 | } | ||
| 74 | |||
| 75 | /* | ||
| 76 | * Not all platforms have 64-bit atomics. If we do, this #define exposes that | ||
| 77 | * fact. | ||
| 78 | */ | ||
| 79 | #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) | ||
| 80 | # define JEMALLOC_ATOMIC_U64 | ||
| 81 | #endif | ||
| 82 | |||
| 83 | JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) | ||
| 84 | |||
| 85 | /* | ||
| 86 | * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only | ||
| 87 | * platform that actually needs to know the size, MSVC. | ||
| 88 | */ | ||
| 89 | JEMALLOC_GENERATE_ATOMICS(bool, b, 0) | ||
| 90 | |||
| 91 | JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) | ||
| 92 | |||
| 93 | JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) | ||
| 94 | |||
| 95 | JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) | ||
| 96 | |||
| 97 | JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0) | ||
| 98 | |||
| 99 | JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2) | ||
| 100 | |||
| 101 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 102 | JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3) | ||
| 103 | #endif | ||
| 104 | |||
| 105 | #undef ATOMIC_INLINE | ||
| 106 | |||
| 107 | #endif /* JEMALLOC_INTERNAL_ATOMIC_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_c11.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_c11.h deleted file mode 100644 index a5f9313..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_c11.h +++ /dev/null | |||
| @@ -1,97 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H | ||
| 2 | #define JEMALLOC_INTERNAL_ATOMIC_C11_H | ||
| 3 | |||
| 4 | #include <stdatomic.h> | ||
| 5 | |||
| 6 | #define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__) | ||
| 7 | |||
| 8 | #define atomic_memory_order_t memory_order | ||
| 9 | #define atomic_memory_order_relaxed memory_order_relaxed | ||
| 10 | #define atomic_memory_order_acquire memory_order_acquire | ||
| 11 | #define atomic_memory_order_release memory_order_release | ||
| 12 | #define atomic_memory_order_acq_rel memory_order_acq_rel | ||
| 13 | #define atomic_memory_order_seq_cst memory_order_seq_cst | ||
| 14 | |||
| 15 | #define atomic_fence atomic_thread_fence | ||
| 16 | |||
| 17 | #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ | ||
| 18 | /* unused */ lg_size) \ | ||
| 19 | typedef _Atomic(type) atomic_##short_type##_t; \ | ||
| 20 | \ | ||
| 21 | ATOMIC_INLINE type \ | ||
| 22 | atomic_load_##short_type(const atomic_##short_type##_t *a, \ | ||
| 23 | atomic_memory_order_t mo) { \ | ||
| 24 | /* \ | ||
| 25 | * A strict interpretation of the C standard prevents \ | ||
| 26 | * atomic_load from taking a const argument, but it's \ | ||
| 27 | * convenient for our purposes. This cast is a workaround. \ | ||
| 28 | */ \ | ||
| 29 | atomic_##short_type##_t* a_nonconst = \ | ||
| 30 | (atomic_##short_type##_t*)a; \ | ||
| 31 | return atomic_load_explicit(a_nonconst, mo); \ | ||
| 32 | } \ | ||
| 33 | \ | ||
| 34 | ATOMIC_INLINE void \ | ||
| 35 | atomic_store_##short_type(atomic_##short_type##_t *a, \ | ||
| 36 | type val, atomic_memory_order_t mo) { \ | ||
| 37 | atomic_store_explicit(a, val, mo); \ | ||
| 38 | } \ | ||
| 39 | \ | ||
| 40 | ATOMIC_INLINE type \ | ||
| 41 | atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 42 | atomic_memory_order_t mo) { \ | ||
| 43 | return atomic_exchange_explicit(a, val, mo); \ | ||
| 44 | } \ | ||
| 45 | \ | ||
| 46 | ATOMIC_INLINE bool \ | ||
| 47 | atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ | ||
| 48 | type *expected, type desired, atomic_memory_order_t success_mo, \ | ||
| 49 | atomic_memory_order_t failure_mo) { \ | ||
| 50 | return atomic_compare_exchange_weak_explicit(a, expected, \ | ||
| 51 | desired, success_mo, failure_mo); \ | ||
| 52 | } \ | ||
| 53 | \ | ||
| 54 | ATOMIC_INLINE bool \ | ||
| 55 | atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ | ||
| 56 | type *expected, type desired, atomic_memory_order_t success_mo, \ | ||
| 57 | atomic_memory_order_t failure_mo) { \ | ||
| 58 | return atomic_compare_exchange_strong_explicit(a, expected, \ | ||
| 59 | desired, success_mo, failure_mo); \ | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Integral types have some special operations available that non-integral ones | ||
| 64 | * lack. | ||
| 65 | */ | ||
| 66 | #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ | ||
| 67 | /* unused */ lg_size) \ | ||
| 68 | JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ | ||
| 69 | \ | ||
| 70 | ATOMIC_INLINE type \ | ||
| 71 | atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ | ||
| 72 | type val, atomic_memory_order_t mo) { \ | ||
| 73 | return atomic_fetch_add_explicit(a, val, mo); \ | ||
| 74 | } \ | ||
| 75 | \ | ||
| 76 | ATOMIC_INLINE type \ | ||
| 77 | atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ | ||
| 78 | type val, atomic_memory_order_t mo) { \ | ||
| 79 | return atomic_fetch_sub_explicit(a, val, mo); \ | ||
| 80 | } \ | ||
| 81 | ATOMIC_INLINE type \ | ||
| 82 | atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ | ||
| 83 | type val, atomic_memory_order_t mo) { \ | ||
| 84 | return atomic_fetch_and_explicit(a, val, mo); \ | ||
| 85 | } \ | ||
| 86 | ATOMIC_INLINE type \ | ||
| 87 | atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ | ||
| 88 | type val, atomic_memory_order_t mo) { \ | ||
| 89 | return atomic_fetch_or_explicit(a, val, mo); \ | ||
| 90 | } \ | ||
| 91 | ATOMIC_INLINE type \ | ||
| 92 | atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ | ||
| 93 | type val, atomic_memory_order_t mo) { \ | ||
| 94 | return atomic_fetch_xor_explicit(a, val, mo); \ | ||
| 95 | } | ||
| 96 | |||
| 97 | #endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h deleted file mode 100644 index 471515e..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h +++ /dev/null | |||
| @@ -1,129 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H | ||
| 2 | #define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/assert.h" | ||
| 5 | |||
| 6 | #define ATOMIC_INIT(...) {__VA_ARGS__} | ||
| 7 | |||
| 8 | typedef enum { | ||
| 9 | atomic_memory_order_relaxed, | ||
| 10 | atomic_memory_order_acquire, | ||
| 11 | atomic_memory_order_release, | ||
| 12 | atomic_memory_order_acq_rel, | ||
| 13 | atomic_memory_order_seq_cst | ||
| 14 | } atomic_memory_order_t; | ||
| 15 | |||
| 16 | ATOMIC_INLINE int | ||
| 17 | atomic_enum_to_builtin(atomic_memory_order_t mo) { | ||
| 18 | switch (mo) { | ||
| 19 | case atomic_memory_order_relaxed: | ||
| 20 | return __ATOMIC_RELAXED; | ||
| 21 | case atomic_memory_order_acquire: | ||
| 22 | return __ATOMIC_ACQUIRE; | ||
| 23 | case atomic_memory_order_release: | ||
| 24 | return __ATOMIC_RELEASE; | ||
| 25 | case atomic_memory_order_acq_rel: | ||
| 26 | return __ATOMIC_ACQ_REL; | ||
| 27 | case atomic_memory_order_seq_cst: | ||
| 28 | return __ATOMIC_SEQ_CST; | ||
| 29 | } | ||
| 30 | /* Can't happen; the switch is exhaustive. */ | ||
| 31 | not_reached(); | ||
| 32 | } | ||
| 33 | |||
| 34 | ATOMIC_INLINE void | ||
| 35 | atomic_fence(atomic_memory_order_t mo) { | ||
| 36 | __atomic_thread_fence(atomic_enum_to_builtin(mo)); | ||
| 37 | } | ||
| 38 | |||
| 39 | #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ | ||
| 40 | /* unused */ lg_size) \ | ||
| 41 | typedef struct { \ | ||
| 42 | type repr; \ | ||
| 43 | } atomic_##short_type##_t; \ | ||
| 44 | \ | ||
| 45 | ATOMIC_INLINE type \ | ||
| 46 | atomic_load_##short_type(const atomic_##short_type##_t *a, \ | ||
| 47 | atomic_memory_order_t mo) { \ | ||
| 48 | type result; \ | ||
| 49 | __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ | ||
| 50 | return result; \ | ||
| 51 | } \ | ||
| 52 | \ | ||
| 53 | ATOMIC_INLINE void \ | ||
| 54 | atomic_store_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 55 | atomic_memory_order_t mo) { \ | ||
| 56 | __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ | ||
| 57 | } \ | ||
| 58 | \ | ||
| 59 | ATOMIC_INLINE type \ | ||
| 60 | atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 61 | atomic_memory_order_t mo) { \ | ||
| 62 | type result; \ | ||
| 63 | __atomic_exchange(&a->repr, &val, &result, \ | ||
| 64 | atomic_enum_to_builtin(mo)); \ | ||
| 65 | return result; \ | ||
| 66 | } \ | ||
| 67 | \ | ||
| 68 | ATOMIC_INLINE bool \ | ||
| 69 | atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ | ||
| 70 | UNUSED type *expected, type desired, \ | ||
| 71 | atomic_memory_order_t success_mo, \ | ||
| 72 | atomic_memory_order_t failure_mo) { \ | ||
| 73 | return __atomic_compare_exchange(&a->repr, expected, &desired, \ | ||
| 74 | true, atomic_enum_to_builtin(success_mo), \ | ||
| 75 | atomic_enum_to_builtin(failure_mo)); \ | ||
| 76 | } \ | ||
| 77 | \ | ||
| 78 | ATOMIC_INLINE bool \ | ||
| 79 | atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ | ||
| 80 | UNUSED type *expected, type desired, \ | ||
| 81 | atomic_memory_order_t success_mo, \ | ||
| 82 | atomic_memory_order_t failure_mo) { \ | ||
| 83 | return __atomic_compare_exchange(&a->repr, expected, &desired, \ | ||
| 84 | false, \ | ||
| 85 | atomic_enum_to_builtin(success_mo), \ | ||
| 86 | atomic_enum_to_builtin(failure_mo)); \ | ||
| 87 | } | ||
| 88 | |||
| 89 | |||
| 90 | #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ | ||
| 91 | /* unused */ lg_size) \ | ||
| 92 | JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ | ||
| 93 | \ | ||
| 94 | ATOMIC_INLINE type \ | ||
| 95 | atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 96 | atomic_memory_order_t mo) { \ | ||
| 97 | return __atomic_fetch_add(&a->repr, val, \ | ||
| 98 | atomic_enum_to_builtin(mo)); \ | ||
| 99 | } \ | ||
| 100 | \ | ||
| 101 | ATOMIC_INLINE type \ | ||
| 102 | atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 103 | atomic_memory_order_t mo) { \ | ||
| 104 | return __atomic_fetch_sub(&a->repr, val, \ | ||
| 105 | atomic_enum_to_builtin(mo)); \ | ||
| 106 | } \ | ||
| 107 | \ | ||
| 108 | ATOMIC_INLINE type \ | ||
| 109 | atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 110 | atomic_memory_order_t mo) { \ | ||
| 111 | return __atomic_fetch_and(&a->repr, val, \ | ||
| 112 | atomic_enum_to_builtin(mo)); \ | ||
| 113 | } \ | ||
| 114 | \ | ||
| 115 | ATOMIC_INLINE type \ | ||
| 116 | atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 117 | atomic_memory_order_t mo) { \ | ||
| 118 | return __atomic_fetch_or(&a->repr, val, \ | ||
| 119 | atomic_enum_to_builtin(mo)); \ | ||
| 120 | } \ | ||
| 121 | \ | ||
| 122 | ATOMIC_INLINE type \ | ||
| 123 | atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 124 | atomic_memory_order_t mo) { \ | ||
| 125 | return __atomic_fetch_xor(&a->repr, val, \ | ||
| 126 | atomic_enum_to_builtin(mo)); \ | ||
| 127 | } | ||
| 128 | |||
| 129 | #endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h deleted file mode 100644 index e02b7cb..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h +++ /dev/null | |||
| @@ -1,195 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H | ||
| 2 | #define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H | ||
| 3 | |||
| 4 | #define ATOMIC_INIT(...) {__VA_ARGS__} | ||
| 5 | |||
| 6 | typedef enum { | ||
| 7 | atomic_memory_order_relaxed, | ||
| 8 | atomic_memory_order_acquire, | ||
| 9 | atomic_memory_order_release, | ||
| 10 | atomic_memory_order_acq_rel, | ||
| 11 | atomic_memory_order_seq_cst | ||
| 12 | } atomic_memory_order_t; | ||
| 13 | |||
| 14 | ATOMIC_INLINE void | ||
| 15 | atomic_fence(atomic_memory_order_t mo) { | ||
| 16 | /* Easy cases first: no barrier, and full barrier. */ | ||
| 17 | if (mo == atomic_memory_order_relaxed) { | ||
| 18 | asm volatile("" ::: "memory"); | ||
| 19 | return; | ||
| 20 | } | ||
| 21 | if (mo == atomic_memory_order_seq_cst) { | ||
| 22 | asm volatile("" ::: "memory"); | ||
| 23 | __sync_synchronize(); | ||
| 24 | asm volatile("" ::: "memory"); | ||
| 25 | return; | ||
| 26 | } | ||
| 27 | asm volatile("" ::: "memory"); | ||
| 28 | # if defined(__i386__) || defined(__x86_64__) | ||
| 29 | /* This is implicit on x86. */ | ||
| 30 | # elif defined(__ppc64__) | ||
| 31 | asm volatile("lwsync"); | ||
| 32 | # elif defined(__ppc__) | ||
| 33 | asm volatile("sync"); | ||
| 34 | # elif defined(__sparc__) && defined(__arch64__) | ||
| 35 | if (mo == atomic_memory_order_acquire) { | ||
| 36 | asm volatile("membar #LoadLoad | #LoadStore"); | ||
| 37 | } else if (mo == atomic_memory_order_release) { | ||
| 38 | asm volatile("membar #LoadStore | #StoreStore"); | ||
| 39 | } else { | ||
| 40 | asm volatile("membar #LoadLoad | #LoadStore | #StoreStore"); | ||
| 41 | } | ||
| 42 | # else | ||
| 43 | __sync_synchronize(); | ||
| 44 | # endif | ||
| 45 | asm volatile("" ::: "memory"); | ||
| 46 | } | ||
| 47 | |||
| 48 | /* | ||
| 49 | * A correct implementation of seq_cst loads and stores on weakly ordered | ||
| 50 | * architectures could do either of the following: | ||
| 51 | * 1. store() is weak-fence -> store -> strong fence, load() is load -> | ||
| 52 | * strong-fence. | ||
| 53 | * 2. store() is strong-fence -> store, load() is strong-fence -> load -> | ||
| 54 | * weak-fence. | ||
| 55 | * The tricky thing is, load() and store() above can be the load or store | ||
| 56 | * portions of a gcc __sync builtin, so we have to follow GCC's lead, which | ||
| 57 | * means going with strategy 2. | ||
| 58 | * On strongly ordered architectures, the natural strategy is to stick a strong | ||
| 59 | * fence after seq_cst stores, and have naked loads. So we want the strong | ||
| 60 | * fences in different places on different architectures. | ||
| 61 | * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to | ||
| 62 | * accomplish this. | ||
| 63 | */ | ||
| 64 | |||
| 65 | ATOMIC_INLINE void | ||
| 66 | atomic_pre_sc_load_fence() { | ||
| 67 | # if defined(__i386__) || defined(__x86_64__) || \ | ||
| 68 | (defined(__sparc__) && defined(__arch64__)) | ||
| 69 | atomic_fence(atomic_memory_order_relaxed); | ||
| 70 | # else | ||
| 71 | atomic_fence(atomic_memory_order_seq_cst); | ||
| 72 | # endif | ||
| 73 | } | ||
| 74 | |||
| 75 | ATOMIC_INLINE void | ||
| 76 | atomic_post_sc_store_fence() { | ||
| 77 | # if defined(__i386__) || defined(__x86_64__) || \ | ||
| 78 | (defined(__sparc__) && defined(__arch64__)) | ||
| 79 | atomic_fence(atomic_memory_order_seq_cst); | ||
| 80 | # else | ||
| 81 | atomic_fence(atomic_memory_order_relaxed); | ||
| 82 | # endif | ||
| 83 | |||
| 84 | } | ||
| 85 | |||
| 86 | #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ | ||
| 87 | /* unused */ lg_size) \ | ||
| 88 | typedef struct { \ | ||
| 89 | type volatile repr; \ | ||
| 90 | } atomic_##short_type##_t; \ | ||
| 91 | \ | ||
| 92 | ATOMIC_INLINE type \ | ||
| 93 | atomic_load_##short_type(const atomic_##short_type##_t *a, \ | ||
| 94 | atomic_memory_order_t mo) { \ | ||
| 95 | if (mo == atomic_memory_order_seq_cst) { \ | ||
| 96 | atomic_pre_sc_load_fence(); \ | ||
| 97 | } \ | ||
| 98 | type result = a->repr; \ | ||
| 99 | if (mo != atomic_memory_order_relaxed) { \ | ||
| 100 | atomic_fence(atomic_memory_order_acquire); \ | ||
| 101 | } \ | ||
| 102 | return result; \ | ||
| 103 | } \ | ||
| 104 | \ | ||
| 105 | ATOMIC_INLINE void \ | ||
| 106 | atomic_store_##short_type(atomic_##short_type##_t *a, \ | ||
| 107 | type val, atomic_memory_order_t mo) { \ | ||
| 108 | if (mo != atomic_memory_order_relaxed) { \ | ||
| 109 | atomic_fence(atomic_memory_order_release); \ | ||
| 110 | } \ | ||
| 111 | a->repr = val; \ | ||
| 112 | if (mo == atomic_memory_order_seq_cst) { \ | ||
| 113 | atomic_post_sc_store_fence(); \ | ||
| 114 | } \ | ||
| 115 | } \ | ||
| 116 | \ | ||
| 117 | ATOMIC_INLINE type \ | ||
| 118 | atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 119 | atomic_memory_order_t mo) { \ | ||
| 120 | /* \ | ||
| 121 | * Because of FreeBSD, we care about gcc 4.2, which doesn't have\ | ||
| 122 | * an atomic exchange builtin. We fake it with a CAS loop. \ | ||
| 123 | */ \ | ||
| 124 | while (true) { \ | ||
| 125 | type old = a->repr; \ | ||
| 126 | if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \ | ||
| 127 | return old; \ | ||
| 128 | } \ | ||
| 129 | } \ | ||
| 130 | } \ | ||
| 131 | \ | ||
| 132 | ATOMIC_INLINE bool \ | ||
| 133 | atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ | ||
| 134 | type *expected, type desired, \ | ||
| 135 | atomic_memory_order_t success_mo, \ | ||
| 136 | atomic_memory_order_t failure_mo) { \ | ||
| 137 | type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ | ||
| 138 | desired); \ | ||
| 139 | if (prev == *expected) { \ | ||
| 140 | return true; \ | ||
| 141 | } else { \ | ||
| 142 | *expected = prev; \ | ||
| 143 | return false; \ | ||
| 144 | } \ | ||
| 145 | } \ | ||
| 146 | ATOMIC_INLINE bool \ | ||
| 147 | atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ | ||
| 148 | type *expected, type desired, \ | ||
| 149 | atomic_memory_order_t success_mo, \ | ||
| 150 | atomic_memory_order_t failure_mo) { \ | ||
| 151 | type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ | ||
| 152 | desired); \ | ||
| 153 | if (prev == *expected) { \ | ||
| 154 | return true; \ | ||
| 155 | } else { \ | ||
| 156 | *expected = prev; \ | ||
| 157 | return false; \ | ||
| 158 | } \ | ||
| 159 | } | ||
| 160 | |||
| 161 | #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ | ||
| 162 | /* unused */ lg_size) \ | ||
| 163 | JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ | ||
| 164 | \ | ||
| 165 | ATOMIC_INLINE type \ | ||
| 166 | atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 167 | atomic_memory_order_t mo) { \ | ||
| 168 | return __sync_fetch_and_add(&a->repr, val); \ | ||
| 169 | } \ | ||
| 170 | \ | ||
| 171 | ATOMIC_INLINE type \ | ||
| 172 | atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 173 | atomic_memory_order_t mo) { \ | ||
| 174 | return __sync_fetch_and_sub(&a->repr, val); \ | ||
| 175 | } \ | ||
| 176 | \ | ||
| 177 | ATOMIC_INLINE type \ | ||
| 178 | atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 179 | atomic_memory_order_t mo) { \ | ||
| 180 | return __sync_fetch_and_and(&a->repr, val); \ | ||
| 181 | } \ | ||
| 182 | \ | ||
| 183 | ATOMIC_INLINE type \ | ||
| 184 | atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 185 | atomic_memory_order_t mo) { \ | ||
| 186 | return __sync_fetch_and_or(&a->repr, val); \ | ||
| 187 | } \ | ||
| 188 | \ | ||
| 189 | ATOMIC_INLINE type \ | ||
| 190 | atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 191 | atomic_memory_order_t mo) { \ | ||
| 192 | return __sync_fetch_and_xor(&a->repr, val); \ | ||
| 193 | } | ||
| 194 | |||
| 195 | #endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h deleted file mode 100644 index 67057ce..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h +++ /dev/null | |||
| @@ -1,158 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H | ||
| 2 | #define JEMALLOC_INTERNAL_ATOMIC_MSVC_H | ||
| 3 | |||
| 4 | #define ATOMIC_INIT(...) {__VA_ARGS__} | ||
| 5 | |||
| 6 | typedef enum { | ||
| 7 | atomic_memory_order_relaxed, | ||
| 8 | atomic_memory_order_acquire, | ||
| 9 | atomic_memory_order_release, | ||
| 10 | atomic_memory_order_acq_rel, | ||
| 11 | atomic_memory_order_seq_cst | ||
| 12 | } atomic_memory_order_t; | ||
| 13 | |||
| 14 | typedef char atomic_repr_0_t; | ||
| 15 | typedef short atomic_repr_1_t; | ||
| 16 | typedef long atomic_repr_2_t; | ||
| 17 | typedef __int64 atomic_repr_3_t; | ||
| 18 | |||
| 19 | ATOMIC_INLINE void | ||
| 20 | atomic_fence(atomic_memory_order_t mo) { | ||
| 21 | _ReadWriteBarrier(); | ||
| 22 | # if defined(_M_ARM) || defined(_M_ARM64) | ||
| 23 | /* ARM needs a barrier for everything but relaxed. */ | ||
| 24 | if (mo != atomic_memory_order_relaxed) { | ||
| 25 | MemoryBarrier(); | ||
| 26 | } | ||
| 27 | # elif defined(_M_IX86) || defined (_M_X64) | ||
| 28 | /* x86 needs a barrier only for seq_cst. */ | ||
| 29 | if (mo == atomic_memory_order_seq_cst) { | ||
| 30 | MemoryBarrier(); | ||
| 31 | } | ||
| 32 | # else | ||
| 33 | # error "Don't know how to create atomics for this platform for MSVC." | ||
| 34 | # endif | ||
| 35 | _ReadWriteBarrier(); | ||
| 36 | } | ||
| 37 | |||
| 38 | #define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t | ||
| 39 | |||
| 40 | #define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b) | ||
| 41 | #define ATOMIC_RAW_CONCAT(a, b) a ## b | ||
| 42 | |||
| 43 | #define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \ | ||
| 44 | base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) | ||
| 45 | |||
| 46 | #define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ | ||
| 47 | ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) | ||
| 48 | |||
| 49 | #define ATOMIC_INTERLOCKED_SUFFIX_0 8 | ||
| 50 | #define ATOMIC_INTERLOCKED_SUFFIX_1 16 | ||
| 51 | #define ATOMIC_INTERLOCKED_SUFFIX_2 | ||
| 52 | #define ATOMIC_INTERLOCKED_SUFFIX_3 64 | ||
| 53 | |||
| 54 | #define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ | ||
| 55 | typedef struct { \ | ||
| 56 | ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ | ||
| 57 | } atomic_##short_type##_t; \ | ||
| 58 | \ | ||
| 59 | ATOMIC_INLINE type \ | ||
| 60 | atomic_load_##short_type(const atomic_##short_type##_t *a, \ | ||
| 61 | atomic_memory_order_t mo) { \ | ||
| 62 | ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ | ||
| 63 | if (mo != atomic_memory_order_relaxed) { \ | ||
| 64 | atomic_fence(atomic_memory_order_acquire); \ | ||
| 65 | } \ | ||
| 66 | return (type) ret; \ | ||
| 67 | } \ | ||
| 68 | \ | ||
| 69 | ATOMIC_INLINE void \ | ||
| 70 | atomic_store_##short_type(atomic_##short_type##_t *a, \ | ||
| 71 | type val, atomic_memory_order_t mo) { \ | ||
| 72 | if (mo != atomic_memory_order_relaxed) { \ | ||
| 73 | atomic_fence(atomic_memory_order_release); \ | ||
| 74 | } \ | ||
| 75 | a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \ | ||
| 76 | if (mo == atomic_memory_order_seq_cst) { \ | ||
| 77 | atomic_fence(atomic_memory_order_seq_cst); \ | ||
| 78 | } \ | ||
| 79 | } \ | ||
| 80 | \ | ||
| 81 | ATOMIC_INLINE type \ | ||
| 82 | atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ | ||
| 83 | atomic_memory_order_t mo) { \ | ||
| 84 | return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ | ||
| 85 | lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ | ||
| 86 | } \ | ||
| 87 | \ | ||
| 88 | ATOMIC_INLINE bool \ | ||
| 89 | atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ | ||
| 90 | type *expected, type desired, atomic_memory_order_t success_mo, \ | ||
| 91 | atomic_memory_order_t failure_mo) { \ | ||
| 92 | ATOMIC_INTERLOCKED_REPR(lg_size) e = \ | ||
| 93 | (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \ | ||
| 94 | ATOMIC_INTERLOCKED_REPR(lg_size) d = \ | ||
| 95 | (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ | ||
| 96 | ATOMIC_INTERLOCKED_REPR(lg_size) old = \ | ||
| 97 | ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \ | ||
| 98 | lg_size)(&a->repr, d, e); \ | ||
| 99 | if (old == e) { \ | ||
| 100 | return true; \ | ||
| 101 | } else { \ | ||
| 102 | *expected = (type)old; \ | ||
| 103 | return false; \ | ||
| 104 | } \ | ||
| 105 | } \ | ||
| 106 | \ | ||
| 107 | ATOMIC_INLINE bool \ | ||
| 108 | atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ | ||
| 109 | type *expected, type desired, atomic_memory_order_t success_mo, \ | ||
| 110 | atomic_memory_order_t failure_mo) { \ | ||
| 111 | /* We implement the weak version with strong semantics. */ \ | ||
| 112 | return atomic_compare_exchange_weak_##short_type(a, expected, \ | ||
| 113 | desired, success_mo, failure_mo); \ | ||
| 114 | } | ||
| 115 | |||
| 116 | |||
| 117 | #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ | ||
| 118 | JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ | ||
| 119 | \ | ||
| 120 | ATOMIC_INLINE type \ | ||
| 121 | atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ | ||
| 122 | type val, atomic_memory_order_t mo) { \ | ||
| 123 | return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \ | ||
| 124 | lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ | ||
| 125 | } \ | ||
| 126 | \ | ||
| 127 | ATOMIC_INLINE type \ | ||
| 128 | atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ | ||
| 129 | type val, atomic_memory_order_t mo) { \ | ||
| 130 | /* \ | ||
| 131 | * MSVC warns on negation of unsigned operands, but for us it \ | ||
| 132 | * gives exactly the right semantics (MAX_TYPE + 1 - operand). \ | ||
| 133 | */ \ | ||
| 134 | __pragma(warning(push)) \ | ||
| 135 | __pragma(warning(disable: 4146)) \ | ||
| 136 | return atomic_fetch_add_##short_type(a, -val, mo); \ | ||
| 137 | __pragma(warning(pop)) \ | ||
| 138 | } \ | ||
| 139 | ATOMIC_INLINE type \ | ||
| 140 | atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ | ||
| 141 | type val, atomic_memory_order_t mo) { \ | ||
| 142 | return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \ | ||
| 143 | &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ | ||
| 144 | } \ | ||
| 145 | ATOMIC_INLINE type \ | ||
| 146 | atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ | ||
| 147 | type val, atomic_memory_order_t mo) { \ | ||
| 148 | return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \ | ||
| 149 | &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ | ||
| 150 | } \ | ||
| 151 | ATOMIC_INLINE type \ | ||
| 152 | atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ | ||
| 153 | type val, atomic_memory_order_t mo) { \ | ||
| 154 | return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \ | ||
| 155 | &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ | ||
| 156 | } | ||
| 157 | |||
| 158 | #endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h deleted file mode 100644 index 6ae3c8d..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h +++ /dev/null | |||
| @@ -1,33 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H | ||
| 3 | |||
| 4 | extern bool opt_background_thread; | ||
| 5 | extern size_t opt_max_background_threads; | ||
| 6 | extern malloc_mutex_t background_thread_lock; | ||
| 7 | extern atomic_b_t background_thread_enabled_state; | ||
| 8 | extern size_t n_background_threads; | ||
| 9 | extern size_t max_background_threads; | ||
| 10 | extern background_thread_info_t *background_thread_info; | ||
| 11 | |||
| 12 | bool background_thread_create(tsd_t *tsd, unsigned arena_ind); | ||
| 13 | bool background_threads_enable(tsd_t *tsd); | ||
| 14 | bool background_threads_disable(tsd_t *tsd); | ||
| 15 | bool background_thread_is_started(background_thread_info_t* info); | ||
| 16 | void background_thread_wakeup_early(background_thread_info_t *info, | ||
| 17 | nstime_t *remaining_sleep); | ||
| 18 | void background_thread_prefork0(tsdn_t *tsdn); | ||
| 19 | void background_thread_prefork1(tsdn_t *tsdn); | ||
| 20 | void background_thread_postfork_parent(tsdn_t *tsdn); | ||
| 21 | void background_thread_postfork_child(tsdn_t *tsdn); | ||
| 22 | bool background_thread_stats_read(tsdn_t *tsdn, | ||
| 23 | background_thread_stats_t *stats); | ||
| 24 | void background_thread_ctl_init(tsdn_t *tsdn); | ||
| 25 | |||
| 26 | #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER | ||
| 27 | extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, | ||
| 28 | void *(*)(void *), void *__restrict); | ||
| 29 | #endif | ||
| 30 | bool background_thread_boot0(void); | ||
| 31 | bool background_thread_boot1(tsdn_t *tsdn, base_t *base); | ||
| 32 | |||
| 33 | #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h deleted file mode 100644 index 92c5feb..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h +++ /dev/null | |||
| @@ -1,48 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H | ||
| 2 | #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H | ||
| 3 | |||
| 4 | JEMALLOC_ALWAYS_INLINE bool | ||
| 5 | background_thread_enabled(void) { | ||
| 6 | return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED); | ||
| 7 | } | ||
| 8 | |||
| 9 | JEMALLOC_ALWAYS_INLINE void | ||
| 10 | background_thread_enabled_set(tsdn_t *tsdn, bool state) { | ||
| 11 | malloc_mutex_assert_owner(tsdn, &background_thread_lock); | ||
| 12 | atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED); | ||
| 13 | } | ||
| 14 | |||
| 15 | JEMALLOC_ALWAYS_INLINE background_thread_info_t * | ||
| 16 | arena_background_thread_info_get(arena_t *arena) { | ||
| 17 | unsigned arena_ind = arena_ind_get(arena); | ||
| 18 | return &background_thread_info[arena_ind % max_background_threads]; | ||
| 19 | } | ||
| 20 | |||
| 21 | JEMALLOC_ALWAYS_INLINE background_thread_info_t * | ||
| 22 | background_thread_info_get(size_t ind) { | ||
| 23 | return &background_thread_info[ind % max_background_threads]; | ||
| 24 | } | ||
| 25 | |||
| 26 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 27 | background_thread_wakeup_time_get(background_thread_info_t *info) { | ||
| 28 | uint64_t next_wakeup = nstime_ns(&info->next_wakeup); | ||
| 29 | assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == | ||
| 30 | (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); | ||
| 31 | return next_wakeup; | ||
| 32 | } | ||
| 33 | |||
| 34 | JEMALLOC_ALWAYS_INLINE void | ||
| 35 | background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, | ||
| 36 | uint64_t wakeup_time) { | ||
| 37 | malloc_mutex_assert_owner(tsdn, &info->mtx); | ||
| 38 | atomic_store_b(&info->indefinite_sleep, | ||
| 39 | wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); | ||
| 40 | nstime_init(&info->next_wakeup, wakeup_time); | ||
| 41 | } | ||
| 42 | |||
| 43 | JEMALLOC_ALWAYS_INLINE bool | ||
| 44 | background_thread_indefinite_sleep(background_thread_info_t *info) { | ||
| 45 | return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); | ||
| 46 | } | ||
| 47 | |||
| 48 | #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h deleted file mode 100644 index 83a9198..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h +++ /dev/null | |||
| @@ -1,66 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H | ||
| 2 | #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H | ||
| 3 | |||
| 4 | /* This file really combines "structs" and "types", but only transitionally. */ | ||
| 5 | |||
| 6 | #if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) | ||
| 7 | # define JEMALLOC_PTHREAD_CREATE_WRAPPER | ||
| 8 | #endif | ||
| 9 | |||
| 10 | #define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX | ||
| 11 | #define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT | ||
| 12 | #define DEFAULT_NUM_BACKGROUND_THREAD 4 | ||
| 13 | |||
| 14 | /* | ||
| 15 | * These exist only as a transitional state. Eventually, deferral should be | ||
| 16 | * part of the PAI, and each implementation can indicate wait times with more | ||
| 17 | * specificity. | ||
| 18 | */ | ||
| 19 | #define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2) | ||
| 20 | #define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000 | ||
| 21 | |||
| 22 | #define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0) | ||
| 23 | #define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX | ||
| 24 | |||
| 25 | typedef enum { | ||
| 26 | background_thread_stopped, | ||
| 27 | background_thread_started, | ||
| 28 | /* Thread waits on the global lock when paused (for arena_reset). */ | ||
| 29 | background_thread_paused, | ||
| 30 | } background_thread_state_t; | ||
| 31 | |||
| 32 | struct background_thread_info_s { | ||
| 33 | #ifdef JEMALLOC_BACKGROUND_THREAD | ||
| 34 | /* Background thread is pthread specific. */ | ||
| 35 | pthread_t thread; | ||
| 36 | pthread_cond_t cond; | ||
| 37 | #endif | ||
| 38 | malloc_mutex_t mtx; | ||
| 39 | background_thread_state_t state; | ||
| 40 | /* When true, it means no wakeup scheduled. */ | ||
| 41 | atomic_b_t indefinite_sleep; | ||
| 42 | /* Next scheduled wakeup time (absolute time in ns). */ | ||
| 43 | nstime_t next_wakeup; | ||
| 44 | /* | ||
| 45 | * Since the last background thread run, newly added number of pages | ||
| 46 | * that need to be purged by the next wakeup. This is adjusted on | ||
| 47 | * epoch advance, and is used to determine whether we should signal the | ||
| 48 | * background thread to wake up earlier. | ||
| 49 | */ | ||
| 50 | size_t npages_to_purge_new; | ||
| 51 | /* Stats: total number of runs since started. */ | ||
| 52 | uint64_t tot_n_runs; | ||
| 53 | /* Stats: total sleep time since started. */ | ||
| 54 | nstime_t tot_sleep_time; | ||
| 55 | }; | ||
| 56 | typedef struct background_thread_info_s background_thread_info_t; | ||
| 57 | |||
| 58 | struct background_thread_stats_s { | ||
| 59 | size_t num_threads; | ||
| 60 | uint64_t num_runs; | ||
| 61 | nstime_t run_interval; | ||
| 62 | mutex_prof_data_t max_counter_per_bg_thd; | ||
| 63 | }; | ||
| 64 | typedef struct background_thread_stats_s background_thread_stats_t; | ||
| 65 | |||
| 66 | #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/base.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/base.h deleted file mode 100644 index 9b2c9fb..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/base.h +++ /dev/null | |||
| @@ -1,110 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BASE_H | ||
| 2 | #define JEMALLOC_INTERNAL_BASE_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/edata.h" | ||
| 5 | #include "jemalloc/internal/ehooks.h" | ||
| 6 | #include "jemalloc/internal/mutex.h" | ||
| 7 | |||
| 8 | enum metadata_thp_mode_e { | ||
| 9 | metadata_thp_disabled = 0, | ||
| 10 | /* | ||
| 11 | * Lazily enable hugepage for metadata. To avoid high RSS caused by THP | ||
| 12 | * + low usage arena (i.e. THP becomes a significant percentage), the | ||
| 13 | * "auto" option only starts using THP after a base allocator used up | ||
| 14 | * the first THP region. Starting from the second hugepage (in a single | ||
| 15 | * arena), "auto" behaves the same as "always", i.e. madvise hugepage | ||
| 16 | * right away. | ||
| 17 | */ | ||
| 18 | metadata_thp_auto = 1, | ||
| 19 | metadata_thp_always = 2, | ||
| 20 | metadata_thp_mode_limit = 3 | ||
| 21 | }; | ||
| 22 | typedef enum metadata_thp_mode_e metadata_thp_mode_t; | ||
| 23 | |||
| 24 | #define METADATA_THP_DEFAULT metadata_thp_disabled | ||
| 25 | extern metadata_thp_mode_t opt_metadata_thp; | ||
| 26 | extern const char *metadata_thp_mode_names[]; | ||
| 27 | |||
| 28 | |||
| 29 | /* Embedded at the beginning of every block of base-managed virtual memory. */ | ||
| 30 | typedef struct base_block_s base_block_t; | ||
| 31 | struct base_block_s { | ||
| 32 | /* Total size of block's virtual memory mapping. */ | ||
| 33 | size_t size; | ||
| 34 | |||
| 35 | /* Next block in list of base's blocks. */ | ||
| 36 | base_block_t *next; | ||
| 37 | |||
| 38 | /* Tracks unused trailing space. */ | ||
| 39 | edata_t edata; | ||
| 40 | }; | ||
| 41 | |||
| 42 | typedef struct base_s base_t; | ||
| 43 | struct base_s { | ||
| 44 | /* | ||
| 45 | * User-configurable extent hook functions. | ||
| 46 | */ | ||
| 47 | ehooks_t ehooks; | ||
| 48 | |||
| 49 | /* | ||
| 50 | * User-configurable extent hook functions for metadata allocations. | ||
| 51 | */ | ||
| 52 | ehooks_t ehooks_base; | ||
| 53 | |||
| 54 | /* Protects base_alloc() and base_stats_get() operations. */ | ||
| 55 | malloc_mutex_t mtx; | ||
| 56 | |||
| 57 | /* Using THP when true (metadata_thp auto mode). */ | ||
| 58 | bool auto_thp_switched; | ||
| 59 | /* | ||
| 60 | * Most recent size class in the series of increasingly large base | ||
| 61 | * extents. Logarithmic spacing between subsequent allocations ensures | ||
| 62 | * that the total number of distinct mappings remains small. | ||
| 63 | */ | ||
| 64 | pszind_t pind_last; | ||
| 65 | |||
| 66 | /* Serial number generation state. */ | ||
| 67 | size_t extent_sn_next; | ||
| 68 | |||
| 69 | /* Chain of all blocks associated with base. */ | ||
| 70 | base_block_t *blocks; | ||
| 71 | |||
| 72 | /* Heap of extents that track unused trailing space within blocks. */ | ||
| 73 | edata_heap_t avail[SC_NSIZES]; | ||
| 74 | |||
| 75 | /* Stats, only maintained if config_stats. */ | ||
| 76 | size_t allocated; | ||
| 77 | size_t resident; | ||
| 78 | size_t mapped; | ||
| 79 | /* Number of THP regions touched. */ | ||
| 80 | size_t n_thp; | ||
| 81 | }; | ||
| 82 | |||
| 83 | static inline unsigned | ||
| 84 | base_ind_get(const base_t *base) { | ||
| 85 | return ehooks_ind_get(&base->ehooks); | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline bool | ||
| 89 | metadata_thp_enabled(void) { | ||
| 90 | return (opt_metadata_thp != metadata_thp_disabled); | ||
| 91 | } | ||
| 92 | |||
| 93 | base_t *b0get(void); | ||
| 94 | base_t *base_new(tsdn_t *tsdn, unsigned ind, | ||
| 95 | const extent_hooks_t *extent_hooks, bool metadata_use_hooks); | ||
| 96 | void base_delete(tsdn_t *tsdn, base_t *base); | ||
| 97 | ehooks_t *base_ehooks_get(base_t *base); | ||
| 98 | ehooks_t *base_ehooks_get_for_metadata(base_t *base); | ||
| 99 | extent_hooks_t *base_extent_hooks_set(base_t *base, | ||
| 100 | extent_hooks_t *extent_hooks); | ||
| 101 | void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); | ||
| 102 | edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base); | ||
| 103 | void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, | ||
| 104 | size_t *resident, size_t *mapped, size_t *n_thp); | ||
| 105 | void base_prefork(tsdn_t *tsdn, base_t *base); | ||
| 106 | void base_postfork_parent(tsdn_t *tsdn, base_t *base); | ||
| 107 | void base_postfork_child(tsdn_t *tsdn, base_t *base); | ||
| 108 | bool base_boot(tsdn_t *tsdn); | ||
| 109 | |||
| 110 | #endif /* JEMALLOC_INTERNAL_BASE_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin.h deleted file mode 100644 index 63f9739..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin.h +++ /dev/null | |||
| @@ -1,82 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BIN_H | ||
| 2 | #define JEMALLOC_INTERNAL_BIN_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bin_stats.h" | ||
| 5 | #include "jemalloc/internal/bin_types.h" | ||
| 6 | #include "jemalloc/internal/edata.h" | ||
| 7 | #include "jemalloc/internal/mutex.h" | ||
| 8 | #include "jemalloc/internal/sc.h" | ||
| 9 | |||
| 10 | /* | ||
| 11 | * A bin contains a set of extents that are currently being used for slab | ||
| 12 | * allocations. | ||
| 13 | */ | ||
| 14 | typedef struct bin_s bin_t; | ||
| 15 | struct bin_s { | ||
| 16 | /* All operations on bin_t fields require lock ownership. */ | ||
| 17 | malloc_mutex_t lock; | ||
| 18 | |||
| 19 | /* | ||
| 20 | * Bin statistics. These get touched every time the lock is acquired, | ||
| 21 | * so put them close by in the hopes of getting some cache locality. | ||
| 22 | */ | ||
| 23 | bin_stats_t stats; | ||
| 24 | |||
| 25 | /* | ||
| 26 | * Current slab being used to service allocations of this bin's size | ||
| 27 | * class. slabcur is independent of slabs_{nonfull,full}; whenever | ||
| 28 | * slabcur is reassigned, the previous slab must be deallocated or | ||
| 29 | * inserted into slabs_{nonfull,full}. | ||
| 30 | */ | ||
| 31 | edata_t *slabcur; | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Heap of non-full slabs. This heap is used to assure that new | ||
| 35 | * allocations come from the non-full slab that is oldest/lowest in | ||
| 36 | * memory. | ||
| 37 | */ | ||
| 38 | edata_heap_t slabs_nonfull; | ||
| 39 | |||
| 40 | /* List used to track full slabs. */ | ||
| 41 | edata_list_active_t slabs_full; | ||
| 42 | }; | ||
| 43 | |||
| 44 | /* A set of sharded bins of the same size class. */ | ||
| 45 | typedef struct bins_s bins_t; | ||
| 46 | struct bins_s { | ||
| 47 | /* Sharded bins. Dynamically sized. */ | ||
| 48 | bin_t *bin_shards; | ||
| 49 | }; | ||
| 50 | |||
| 51 | void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]); | ||
| 52 | bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size, | ||
| 53 | size_t end_size, size_t nshards); | ||
| 54 | |||
| 55 | /* Initializes a bin to empty. Returns true on error. */ | ||
| 56 | bool bin_init(bin_t *bin); | ||
| 57 | |||
| 58 | /* Forking. */ | ||
| 59 | void bin_prefork(tsdn_t *tsdn, bin_t *bin); | ||
| 60 | void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin); | ||
| 61 | void bin_postfork_child(tsdn_t *tsdn, bin_t *bin); | ||
| 62 | |||
| 63 | /* Stats. */ | ||
| 64 | static inline void | ||
| 65 | bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) { | ||
| 66 | malloc_mutex_lock(tsdn, &bin->lock); | ||
| 67 | malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock); | ||
| 68 | bin_stats_t *stats = &dst_bin_stats->stats_data; | ||
| 69 | stats->nmalloc += bin->stats.nmalloc; | ||
| 70 | stats->ndalloc += bin->stats.ndalloc; | ||
| 71 | stats->nrequests += bin->stats.nrequests; | ||
| 72 | stats->curregs += bin->stats.curregs; | ||
| 73 | stats->nfills += bin->stats.nfills; | ||
| 74 | stats->nflushes += bin->stats.nflushes; | ||
| 75 | stats->nslabs += bin->stats.nslabs; | ||
| 76 | stats->reslabs += bin->stats.reslabs; | ||
| 77 | stats->curslabs += bin->stats.curslabs; | ||
| 78 | stats->nonfull_slabs += bin->stats.nonfull_slabs; | ||
| 79 | malloc_mutex_unlock(tsdn, &bin->lock); | ||
| 80 | } | ||
| 81 | |||
| 82 | #endif /* JEMALLOC_INTERNAL_BIN_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_info.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_info.h deleted file mode 100644 index 7fe65c8..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_info.h +++ /dev/null | |||
| @@ -1,50 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BIN_INFO_H | ||
| 2 | #define JEMALLOC_INTERNAL_BIN_INFO_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bitmap.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Read-only information associated with each element of arena_t's bins array | ||
| 8 | * is stored separately, partly to reduce memory usage (only one copy, rather | ||
| 9 | * than one per arena), but mainly to avoid false cacheline sharing. | ||
| 10 | * | ||
| 11 | * Each slab has the following layout: | ||
| 12 | * | ||
| 13 | * /--------------------\ | ||
| 14 | * | region 0 | | ||
| 15 | * |--------------------| | ||
| 16 | * | region 1 | | ||
| 17 | * |--------------------| | ||
| 18 | * | ... | | ||
| 19 | * | ... | | ||
| 20 | * | ... | | ||
| 21 | * |--------------------| | ||
| 22 | * | region nregs-1 | | ||
| 23 | * \--------------------/ | ||
| 24 | */ | ||
| 25 | typedef struct bin_info_s bin_info_t; | ||
| 26 | struct bin_info_s { | ||
| 27 | /* Size of regions in a slab for this bin's size class. */ | ||
| 28 | size_t reg_size; | ||
| 29 | |||
| 30 | /* Total size of a slab for this bin's size class. */ | ||
| 31 | size_t slab_size; | ||
| 32 | |||
| 33 | /* Total number of regions in a slab for this bin's size class. */ | ||
| 34 | uint32_t nregs; | ||
| 35 | |||
| 36 | /* Number of sharded bins in each arena for this size class. */ | ||
| 37 | uint32_t n_shards; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Metadata used to manipulate bitmaps for slabs associated with this | ||
| 41 | * bin. | ||
| 42 | */ | ||
| 43 | bitmap_info_t bitmap_info; | ||
| 44 | }; | ||
| 45 | |||
| 46 | extern bin_info_t bin_infos[SC_NBINS]; | ||
| 47 | |||
| 48 | void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]); | ||
| 49 | |||
| 50 | #endif /* JEMALLOC_INTERNAL_BIN_INFO_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_stats.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_stats.h deleted file mode 100644 index 0b99297..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_stats.h +++ /dev/null | |||
| @@ -1,57 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BIN_STATS_H | ||
| 2 | #define JEMALLOC_INTERNAL_BIN_STATS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/mutex_prof.h" | ||
| 5 | |||
| 6 | typedef struct bin_stats_s bin_stats_t; | ||
| 7 | struct bin_stats_s { | ||
| 8 | /* | ||
| 9 | * Total number of allocation/deallocation requests served directly by | ||
| 10 | * the bin. Note that tcache may allocate an object, then recycle it | ||
| 11 | * many times, resulting many increments to nrequests, but only one | ||
| 12 | * each to nmalloc and ndalloc. | ||
| 13 | */ | ||
| 14 | uint64_t nmalloc; | ||
| 15 | uint64_t ndalloc; | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Number of allocation requests that correspond to the size of this | ||
| 19 | * bin. This includes requests served by tcache, though tcache only | ||
| 20 | * periodically merges into this counter. | ||
| 21 | */ | ||
| 22 | uint64_t nrequests; | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Current number of regions of this size class, including regions | ||
| 26 | * currently cached by tcache. | ||
| 27 | */ | ||
| 28 | size_t curregs; | ||
| 29 | |||
| 30 | /* Number of tcache fills from this bin. */ | ||
| 31 | uint64_t nfills; | ||
| 32 | |||
| 33 | /* Number of tcache flushes to this bin. */ | ||
| 34 | uint64_t nflushes; | ||
| 35 | |||
| 36 | /* Total number of slabs created for this bin's size class. */ | ||
| 37 | uint64_t nslabs; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Total number of slabs reused by extracting them from the slabs heap | ||
| 41 | * for this bin's size class. | ||
| 42 | */ | ||
| 43 | uint64_t reslabs; | ||
| 44 | |||
| 45 | /* Current number of slabs in this bin. */ | ||
| 46 | size_t curslabs; | ||
| 47 | |||
| 48 | /* Current size of nonfull slabs heap in this bin. */ | ||
| 49 | size_t nonfull_slabs; | ||
| 50 | }; | ||
| 51 | |||
| 52 | typedef struct bin_stats_data_s bin_stats_data_t; | ||
| 53 | struct bin_stats_data_s { | ||
| 54 | bin_stats_t stats_data; | ||
| 55 | mutex_prof_data_t mutex_data; | ||
| 56 | }; | ||
| 57 | #endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_types.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_types.h deleted file mode 100644 index 945e832..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bin_types.h +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BIN_TYPES_H | ||
| 2 | #define JEMALLOC_INTERNAL_BIN_TYPES_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/sc.h" | ||
| 5 | |||
| 6 | #define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH) | ||
| 7 | #define N_BIN_SHARDS_DEFAULT 1 | ||
| 8 | |||
| 9 | /* Used in TSD static initializer only. Real init in arena_bind(). */ | ||
| 10 | #define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}} | ||
| 11 | |||
| 12 | typedef struct tsd_binshards_s tsd_binshards_t; | ||
| 13 | struct tsd_binshards_s { | ||
| 14 | uint8_t binshard[SC_NBINS]; | ||
| 15 | }; | ||
| 16 | |||
| 17 | #endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bit_util.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bit_util.h deleted file mode 100644 index bac5914..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bit_util.h +++ /dev/null | |||
| @@ -1,422 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BIT_UTIL_H | ||
| 2 | #define JEMALLOC_INTERNAL_BIT_UTIL_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/assert.h" | ||
| 5 | |||
| 6 | /* Sanity check. */ | ||
| 7 | #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ | ||
| 8 | || !defined(JEMALLOC_INTERNAL_FFS) | ||
| 9 | # error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure | ||
| 10 | #endif | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Unlike the builtins and posix ffs functions, our ffs requires a non-zero | ||
| 14 | * input, and returns the position of the lowest bit set (as opposed to the | ||
| 15 | * posix versions, which return 1 larger than that position and use a return | ||
| 16 | * value of zero as a sentinel. This tends to simplify logic in callers, and | ||
| 17 | * allows for consistency with the builtins we build fls on top of. | ||
| 18 | */ | ||
| 19 | static inline unsigned | ||
| 20 | ffs_llu(unsigned long long x) { | ||
| 21 | util_assume(x != 0); | ||
| 22 | return JEMALLOC_INTERNAL_FFSLL(x) - 1; | ||
| 23 | } | ||
| 24 | |||
| 25 | static inline unsigned | ||
| 26 | ffs_lu(unsigned long x) { | ||
| 27 | util_assume(x != 0); | ||
| 28 | return JEMALLOC_INTERNAL_FFSL(x) - 1; | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline unsigned | ||
| 32 | ffs_u(unsigned x) { | ||
| 33 | util_assume(x != 0); | ||
| 34 | return JEMALLOC_INTERNAL_FFS(x) - 1; | ||
| 35 | } | ||
| 36 | |||
| 37 | #define DO_FLS_SLOW(x, suffix) do { \ | ||
| 38 | util_assume(x != 0); \ | ||
| 39 | x |= (x >> 1); \ | ||
| 40 | x |= (x >> 2); \ | ||
| 41 | x |= (x >> 4); \ | ||
| 42 | x |= (x >> 8); \ | ||
| 43 | x |= (x >> 16); \ | ||
| 44 | if (sizeof(x) > 4) { \ | ||
| 45 | /* \ | ||
| 46 | * If sizeof(x) is 4, then the expression "x >> 32" \ | ||
| 47 | * will generate compiler warnings even if the code \ | ||
| 48 | * never executes. This circumvents the warning, and \ | ||
| 49 | * gets compiled out in optimized builds. \ | ||
| 50 | */ \ | ||
| 51 | int constant_32 = sizeof(x) * 4; \ | ||
| 52 | x |= (x >> constant_32); \ | ||
| 53 | } \ | ||
| 54 | x++; \ | ||
| 55 | if (x == 0) { \ | ||
| 56 | return 8 * sizeof(x) - 1; \ | ||
| 57 | } \ | ||
| 58 | return ffs_##suffix(x) - 1; \ | ||
| 59 | } while(0) | ||
| 60 | |||
| 61 | static inline unsigned | ||
| 62 | fls_llu_slow(unsigned long long x) { | ||
| 63 | DO_FLS_SLOW(x, llu); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline unsigned | ||
| 67 | fls_lu_slow(unsigned long x) { | ||
| 68 | DO_FLS_SLOW(x, lu); | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline unsigned | ||
| 72 | fls_u_slow(unsigned x) { | ||
| 73 | DO_FLS_SLOW(x, u); | ||
| 74 | } | ||
| 75 | |||
| 76 | #undef DO_FLS_SLOW | ||
| 77 | |||
| 78 | #ifdef JEMALLOC_HAVE_BUILTIN_CLZ | ||
| 79 | static inline unsigned | ||
| 80 | fls_llu(unsigned long long x) { | ||
| 81 | util_assume(x != 0); | ||
| 82 | /* | ||
| 83 | * Note that the xor here is more naturally written as subtraction; the | ||
| 84 | * last bit set is the number of bits in the type minus the number of | ||
| 85 | * leading zero bits. But GCC implements that as: | ||
| 86 | * bsr edi, edi | ||
| 87 | * mov eax, 31 | ||
| 88 | * xor edi, 31 | ||
| 89 | * sub eax, edi | ||
| 90 | * If we write it as xor instead, then we get | ||
| 91 | * bsr eax, edi | ||
| 92 | * as desired. | ||
| 93 | */ | ||
| 94 | return (8 * sizeof(x) - 1) ^ __builtin_clzll(x); | ||
| 95 | } | ||
| 96 | |||
| 97 | static inline unsigned | ||
| 98 | fls_lu(unsigned long x) { | ||
| 99 | util_assume(x != 0); | ||
| 100 | return (8 * sizeof(x) - 1) ^ __builtin_clzl(x); | ||
| 101 | } | ||
| 102 | |||
| 103 | static inline unsigned | ||
| 104 | fls_u(unsigned x) { | ||
| 105 | util_assume(x != 0); | ||
| 106 | return (8 * sizeof(x) - 1) ^ __builtin_clz(x); | ||
| 107 | } | ||
| 108 | #elif defined(_MSC_VER) | ||
| 109 | |||
| 110 | #if LG_SIZEOF_PTR == 3 | ||
| 111 | #define DO_BSR64(bit, x) _BitScanReverse64(&bit, x) | ||
| 112 | #else | ||
| 113 | /* | ||
| 114 | * This never actually runs; we're just dodging a compiler error for the | ||
| 115 | * never-taken branch where sizeof(void *) == 8. | ||
| 116 | */ | ||
| 117 | #define DO_BSR64(bit, x) bit = 0; unreachable() | ||
| 118 | #endif | ||
| 119 | |||
| 120 | #define DO_FLS(x) do { \ | ||
| 121 | if (x == 0) { \ | ||
| 122 | return 8 * sizeof(x); \ | ||
| 123 | } \ | ||
| 124 | unsigned long bit; \ | ||
| 125 | if (sizeof(x) == 4) { \ | ||
| 126 | _BitScanReverse(&bit, (unsigned)x); \ | ||
| 127 | return (unsigned)bit; \ | ||
| 128 | } \ | ||
| 129 | if (sizeof(x) == 8 && sizeof(void *) == 8) { \ | ||
| 130 | DO_BSR64(bit, x); \ | ||
| 131 | return (unsigned)bit; \ | ||
| 132 | } \ | ||
| 133 | if (sizeof(x) == 8 && sizeof(void *) == 4) { \ | ||
| 134 | /* Dodge a compiler warning, as above. */ \ | ||
| 135 | int constant_32 = sizeof(x) * 4; \ | ||
| 136 | if (_BitScanReverse(&bit, \ | ||
| 137 | (unsigned)(x >> constant_32))) { \ | ||
| 138 | return 32 + (unsigned)bit; \ | ||
| 139 | } else { \ | ||
| 140 | _BitScanReverse(&bit, (unsigned)x); \ | ||
| 141 | return (unsigned)bit; \ | ||
| 142 | } \ | ||
| 143 | } \ | ||
| 144 | unreachable(); \ | ||
| 145 | } while (0) | ||
| 146 | |||
| 147 | static inline unsigned | ||
| 148 | fls_llu(unsigned long long x) { | ||
| 149 | DO_FLS(x); | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline unsigned | ||
| 153 | fls_lu(unsigned long x) { | ||
| 154 | DO_FLS(x); | ||
| 155 | } | ||
| 156 | |||
| 157 | static inline unsigned | ||
| 158 | fls_u(unsigned x) { | ||
| 159 | DO_FLS(x); | ||
| 160 | } | ||
| 161 | |||
| 162 | #undef DO_FLS | ||
| 163 | #undef DO_BSR64 | ||
| 164 | #else | ||
| 165 | |||
| 166 | static inline unsigned | ||
| 167 | fls_llu(unsigned long long x) { | ||
| 168 | return fls_llu_slow(x); | ||
| 169 | } | ||
| 170 | |||
| 171 | static inline unsigned | ||
| 172 | fls_lu(unsigned long x) { | ||
| 173 | return fls_lu_slow(x); | ||
| 174 | } | ||
| 175 | |||
| 176 | static inline unsigned | ||
| 177 | fls_u(unsigned x) { | ||
| 178 | return fls_u_slow(x); | ||
| 179 | } | ||
| 180 | #endif | ||
| 181 | |||
| 182 | #if LG_SIZEOF_LONG_LONG > 3 | ||
| 183 | # error "Haven't implemented popcount for 16-byte ints." | ||
| 184 | #endif | ||
| 185 | |||
| 186 | #define DO_POPCOUNT(x, type) do { \ | ||
| 187 | /* \ | ||
| 188 | * Algorithm from an old AMD optimization reference manual. \ | ||
| 189 | * We're putting a little bit more work than you might expect \ | ||
| 190 | * into the no-instrinsic case, since we only support the \ | ||
| 191 | * GCC intrinsics spelling of popcount (for now). Detecting \ | ||
| 192 | * whether or not the popcount builtin is actually useable in \ | ||
| 193 | * MSVC is nontrivial. \ | ||
| 194 | */ \ | ||
| 195 | \ | ||
| 196 | type bmul = (type)0x0101010101010101ULL; \ | ||
| 197 | \ | ||
| 198 | /* \ | ||
| 199 | * Replace each 2 bits with the sideways sum of the original \ | ||
| 200 | * values. 0x5 = 0b0101. \ | ||
| 201 | * \ | ||
| 202 | * You might expect this to be: \ | ||
| 203 | * x = (x & 0x55...) + ((x >> 1) & 0x55...). \ | ||
| 204 | * That costs an extra mask relative to this, though. \ | ||
| 205 | */ \ | ||
| 206 | x = x - ((x >> 1) & (0x55U * bmul)); \ | ||
| 207 | /* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\ | ||
| 208 | x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \ | ||
| 209 | /* \ | ||
| 210 | * Replace each 8 bits with their sideways sum. Note that we \ | ||
| 211 | * can't overflow within each 4-bit sum here, so we can skip \ | ||
| 212 | * the initial mask. \ | ||
| 213 | */ \ | ||
| 214 | x = (x + (x >> 4)) & (bmul * 0x0FU); \ | ||
| 215 | /* \ | ||
| 216 | * None of the partial sums in this multiplication (viewed in \ | ||
| 217 | * base-256) can overflow into the next digit. So the least \ | ||
| 218 | * significant byte of the product will be the least \ | ||
| 219 | * significant byte of the original value, the second least \ | ||
| 220 | * significant byte will be the sum of the two least \ | ||
| 221 | * significant bytes of the original value, and so on. \ | ||
| 222 | * Importantly, the high byte will be the byte-wise sum of all \ | ||
| 223 | * the bytes of the original value. \ | ||
| 224 | */ \ | ||
| 225 | x = x * bmul; \ | ||
| 226 | x >>= ((sizeof(x) - 1) * 8); \ | ||
| 227 | return (unsigned)x; \ | ||
| 228 | } while(0) | ||
| 229 | |||
| 230 | static inline unsigned | ||
| 231 | popcount_u_slow(unsigned bitmap) { | ||
| 232 | DO_POPCOUNT(bitmap, unsigned); | ||
| 233 | } | ||
| 234 | |||
| 235 | static inline unsigned | ||
| 236 | popcount_lu_slow(unsigned long bitmap) { | ||
| 237 | DO_POPCOUNT(bitmap, unsigned long); | ||
| 238 | } | ||
| 239 | |||
| 240 | static inline unsigned | ||
| 241 | popcount_llu_slow(unsigned long long bitmap) { | ||
| 242 | DO_POPCOUNT(bitmap, unsigned long long); | ||
| 243 | } | ||
| 244 | |||
| 245 | #undef DO_POPCOUNT | ||
| 246 | |||
| 247 | static inline unsigned | ||
| 248 | popcount_u(unsigned bitmap) { | ||
| 249 | #ifdef JEMALLOC_INTERNAL_POPCOUNT | ||
| 250 | return JEMALLOC_INTERNAL_POPCOUNT(bitmap); | ||
| 251 | #else | ||
| 252 | return popcount_u_slow(bitmap); | ||
| 253 | #endif | ||
| 254 | } | ||
| 255 | |||
| 256 | static inline unsigned | ||
| 257 | popcount_lu(unsigned long bitmap) { | ||
| 258 | #ifdef JEMALLOC_INTERNAL_POPCOUNTL | ||
| 259 | return JEMALLOC_INTERNAL_POPCOUNTL(bitmap); | ||
| 260 | #else | ||
| 261 | return popcount_lu_slow(bitmap); | ||
| 262 | #endif | ||
| 263 | } | ||
| 264 | |||
| 265 | static inline unsigned | ||
| 266 | popcount_llu(unsigned long long bitmap) { | ||
| 267 | #ifdef JEMALLOC_INTERNAL_POPCOUNTLL | ||
| 268 | return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap); | ||
| 269 | #else | ||
| 270 | return popcount_llu_slow(bitmap); | ||
| 271 | #endif | ||
| 272 | } | ||
| 273 | |||
| 274 | /* | ||
| 275 | * Clears first unset bit in bitmap, and returns | ||
| 276 | * place of bit. bitmap *must not* be 0. | ||
| 277 | */ | ||
| 278 | |||
| 279 | static inline size_t | ||
| 280 | cfs_lu(unsigned long* bitmap) { | ||
| 281 | util_assume(*bitmap != 0); | ||
| 282 | size_t bit = ffs_lu(*bitmap); | ||
| 283 | *bitmap ^= ZU(1) << bit; | ||
| 284 | return bit; | ||
| 285 | } | ||
| 286 | |||
| 287 | static inline unsigned | ||
| 288 | ffs_zu(size_t x) { | ||
| 289 | #if LG_SIZEOF_PTR == LG_SIZEOF_INT | ||
| 290 | return ffs_u(x); | ||
| 291 | #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG | ||
| 292 | return ffs_lu(x); | ||
| 293 | #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG | ||
| 294 | return ffs_llu(x); | ||
| 295 | #else | ||
| 296 | #error No implementation for size_t ffs() | ||
| 297 | #endif | ||
| 298 | } | ||
| 299 | |||
| 300 | static inline unsigned | ||
| 301 | fls_zu(size_t x) { | ||
| 302 | #if LG_SIZEOF_PTR == LG_SIZEOF_INT | ||
| 303 | return fls_u(x); | ||
| 304 | #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG | ||
| 305 | return fls_lu(x); | ||
| 306 | #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG | ||
| 307 | return fls_llu(x); | ||
| 308 | #else | ||
| 309 | #error No implementation for size_t fls() | ||
| 310 | #endif | ||
| 311 | } | ||
| 312 | |||
| 313 | |||
| 314 | static inline unsigned | ||
| 315 | ffs_u64(uint64_t x) { | ||
| 316 | #if LG_SIZEOF_LONG == 3 | ||
| 317 | return ffs_lu(x); | ||
| 318 | #elif LG_SIZEOF_LONG_LONG == 3 | ||
| 319 | return ffs_llu(x); | ||
| 320 | #else | ||
| 321 | #error No implementation for 64-bit ffs() | ||
| 322 | #endif | ||
| 323 | } | ||
| 324 | |||
| 325 | static inline unsigned | ||
| 326 | fls_u64(uint64_t x) { | ||
| 327 | #if LG_SIZEOF_LONG == 3 | ||
| 328 | return fls_lu(x); | ||
| 329 | #elif LG_SIZEOF_LONG_LONG == 3 | ||
| 330 | return fls_llu(x); | ||
| 331 | #else | ||
| 332 | #error No implementation for 64-bit fls() | ||
| 333 | #endif | ||
| 334 | } | ||
| 335 | |||
| 336 | static inline unsigned | ||
| 337 | ffs_u32(uint32_t x) { | ||
| 338 | #if LG_SIZEOF_INT == 2 | ||
| 339 | return ffs_u(x); | ||
| 340 | #else | ||
| 341 | #error No implementation for 32-bit ffs() | ||
| 342 | #endif | ||
| 343 | return ffs_u(x); | ||
| 344 | } | ||
| 345 | |||
| 346 | static inline unsigned | ||
| 347 | fls_u32(uint32_t x) { | ||
| 348 | #if LG_SIZEOF_INT == 2 | ||
| 349 | return fls_u(x); | ||
| 350 | #else | ||
| 351 | #error No implementation for 32-bit fls() | ||
| 352 | #endif | ||
| 353 | return fls_u(x); | ||
| 354 | } | ||
| 355 | |||
| 356 | static inline uint64_t | ||
| 357 | pow2_ceil_u64(uint64_t x) { | ||
| 358 | if (unlikely(x <= 1)) { | ||
| 359 | return x; | ||
| 360 | } | ||
| 361 | size_t msb_on_index = fls_u64(x - 1); | ||
| 362 | /* | ||
| 363 | * Range-check; it's on the callers to ensure that the result of this | ||
| 364 | * call won't overflow. | ||
| 365 | */ | ||
| 366 | assert(msb_on_index < 63); | ||
| 367 | return 1ULL << (msb_on_index + 1); | ||
| 368 | } | ||
| 369 | |||
| 370 | static inline uint32_t | ||
| 371 | pow2_ceil_u32(uint32_t x) { | ||
| 372 | if (unlikely(x <= 1)) { | ||
| 373 | return x; | ||
| 374 | } | ||
| 375 | size_t msb_on_index = fls_u32(x - 1); | ||
| 376 | /* As above. */ | ||
| 377 | assert(msb_on_index < 31); | ||
| 378 | return 1U << (msb_on_index + 1); | ||
| 379 | } | ||
| 380 | |||
| 381 | /* Compute the smallest power of 2 that is >= x. */ | ||
| 382 | static inline size_t | ||
| 383 | pow2_ceil_zu(size_t x) { | ||
| 384 | #if (LG_SIZEOF_PTR == 3) | ||
| 385 | return pow2_ceil_u64(x); | ||
| 386 | #else | ||
| 387 | return pow2_ceil_u32(x); | ||
| 388 | #endif | ||
| 389 | } | ||
| 390 | |||
| 391 | static inline unsigned | ||
| 392 | lg_floor(size_t x) { | ||
| 393 | util_assume(x != 0); | ||
| 394 | #if (LG_SIZEOF_PTR == 3) | ||
| 395 | return fls_u64(x); | ||
| 396 | #else | ||
| 397 | return fls_u32(x); | ||
| 398 | #endif | ||
| 399 | } | ||
| 400 | |||
| 401 | static inline unsigned | ||
| 402 | lg_ceil(size_t x) { | ||
| 403 | return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1); | ||
| 404 | } | ||
| 405 | |||
| 406 | /* A compile-time version of lg_floor and lg_ceil. */ | ||
| 407 | #define LG_FLOOR_1(x) 0 | ||
| 408 | #define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1)) | ||
| 409 | #define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2)) | ||
| 410 | #define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4)) | ||
| 411 | #define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8)) | ||
| 412 | #define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16)) | ||
| 413 | #define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32)) | ||
| 414 | #if LG_SIZEOF_PTR == 2 | ||
| 415 | # define LG_FLOOR(x) LG_FLOOR_32((x)) | ||
| 416 | #else | ||
| 417 | # define LG_FLOOR(x) LG_FLOOR_64((x)) | ||
| 418 | #endif | ||
| 419 | |||
| 420 | #define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1)) | ||
| 421 | |||
| 422 | #endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bitmap.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bitmap.h deleted file mode 100644 index dc19454..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/bitmap.h +++ /dev/null | |||
| @@ -1,368 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BITMAP_H | ||
| 2 | #define JEMALLOC_INTERNAL_BITMAP_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bit_util.h" | ||
| 5 | #include "jemalloc/internal/sc.h" | ||
| 6 | |||
| 7 | typedef unsigned long bitmap_t; | ||
| 8 | #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG | ||
| 9 | |||
| 10 | /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ | ||
| 11 | #if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) | ||
| 12 | /* Maximum bitmap bit count is determined by maximum regions per slab. */ | ||
| 13 | # define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS | ||
| 14 | #else | ||
| 15 | /* Maximum bitmap bit count is determined by number of extent size classes. */ | ||
| 16 | # define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES) | ||
| 17 | #endif | ||
| 18 | #define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) | ||
| 19 | |||
| 20 | /* Number of bits per group. */ | ||
| 21 | #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) | ||
| 22 | #define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) | ||
| 23 | #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) | ||
| 24 | |||
| 25 | /* | ||
| 26 | * Do some analysis on how big the bitmap is before we use a tree. For a brute | ||
| 27 | * force linear search, if we would have to call ffs_lu() more than 2^3 times, | ||
| 28 | * use a tree instead. | ||
| 29 | */ | ||
| 30 | #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 | ||
| 31 | # define BITMAP_USE_TREE | ||
| 32 | #endif | ||
| 33 | |||
| 34 | /* Number of groups required to store a given number of bits. */ | ||
| 35 | #define BITMAP_BITS2GROUPS(nbits) \ | ||
| 36 | (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Number of groups required at a particular level for a given number of bits. | ||
| 40 | */ | ||
| 41 | #define BITMAP_GROUPS_L0(nbits) \ | ||
| 42 | BITMAP_BITS2GROUPS(nbits) | ||
| 43 | #define BITMAP_GROUPS_L1(nbits) \ | ||
| 44 | BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) | ||
| 45 | #define BITMAP_GROUPS_L2(nbits) \ | ||
| 46 | BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) | ||
| 47 | #define BITMAP_GROUPS_L3(nbits) \ | ||
| 48 | BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ | ||
| 49 | BITMAP_BITS2GROUPS((nbits))))) | ||
| 50 | #define BITMAP_GROUPS_L4(nbits) \ | ||
| 51 | BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ | ||
| 52 | BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) | ||
| 53 | |||
| 54 | /* | ||
| 55 | * Assuming the number of levels, number of groups required for a given number | ||
| 56 | * of bits. | ||
| 57 | */ | ||
| 58 | #define BITMAP_GROUPS_1_LEVEL(nbits) \ | ||
| 59 | BITMAP_GROUPS_L0(nbits) | ||
| 60 | #define BITMAP_GROUPS_2_LEVEL(nbits) \ | ||
| 61 | (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) | ||
| 62 | #define BITMAP_GROUPS_3_LEVEL(nbits) \ | ||
| 63 | (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) | ||
| 64 | #define BITMAP_GROUPS_4_LEVEL(nbits) \ | ||
| 65 | (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) | ||
| 66 | #define BITMAP_GROUPS_5_LEVEL(nbits) \ | ||
| 67 | (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Maximum number of groups required to support LG_BITMAP_MAXBITS. | ||
| 71 | */ | ||
| 72 | #ifdef BITMAP_USE_TREE | ||
| 73 | |||
| 74 | #if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS | ||
| 75 | # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) | ||
| 76 | # define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) | ||
| 77 | #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 | ||
| 78 | # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) | ||
| 79 | # define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) | ||
| 80 | #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 | ||
| 81 | # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) | ||
| 82 | # define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) | ||
| 83 | #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 | ||
| 84 | # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) | ||
| 85 | # define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) | ||
| 86 | #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 | ||
| 87 | # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) | ||
| 88 | # define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) | ||
| 89 | #else | ||
| 90 | # error "Unsupported bitmap size" | ||
| 91 | #endif | ||
| 92 | |||
| 93 | /* | ||
| 94 | * Maximum number of levels possible. This could be statically computed based | ||
| 95 | * on LG_BITMAP_MAXBITS: | ||
| 96 | * | ||
| 97 | * #define BITMAP_MAX_LEVELS \ | ||
| 98 | * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ | ||
| 99 | * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) | ||
| 100 | * | ||
| 101 | * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so | ||
| 102 | * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the | ||
| 103 | * various cascading macros. The only additional cost this incurs is some | ||
| 104 | * unused trailing entries in bitmap_info_t structures; the bitmaps themselves | ||
| 105 | * are not impacted. | ||
| 106 | */ | ||
| 107 | #define BITMAP_MAX_LEVELS 5 | ||
| 108 | |||
| 109 | #define BITMAP_INFO_INITIALIZER(nbits) { \ | ||
| 110 | /* nbits. */ \ | ||
| 111 | nbits, \ | ||
| 112 | /* nlevels. */ \ | ||
| 113 | (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ | ||
| 114 | (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ | ||
| 115 | (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ | ||
| 116 | (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ | ||
| 117 | /* levels. */ \ | ||
| 118 | { \ | ||
| 119 | {0}, \ | ||
| 120 | {BITMAP_GROUPS_L0(nbits)}, \ | ||
| 121 | {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ | ||
| 122 | {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ | ||
| 123 | BITMAP_GROUPS_L0(nbits)}, \ | ||
| 124 | {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ | ||
| 125 | BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ | ||
| 126 | {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ | ||
| 127 | BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ | ||
| 128 | + BITMAP_GROUPS_L0(nbits)} \ | ||
| 129 | } \ | ||
| 130 | } | ||
| 131 | |||
| 132 | #else /* BITMAP_USE_TREE */ | ||
| 133 | |||
| 134 | #define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) | ||
| 135 | #define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) | ||
| 136 | |||
| 137 | #define BITMAP_INFO_INITIALIZER(nbits) { \ | ||
| 138 | /* nbits. */ \ | ||
| 139 | nbits, \ | ||
| 140 | /* ngroups. */ \ | ||
| 141 | BITMAP_BITS2GROUPS(nbits) \ | ||
| 142 | } | ||
| 143 | |||
| 144 | #endif /* BITMAP_USE_TREE */ | ||
| 145 | |||
| 146 | typedef struct bitmap_level_s { | ||
| 147 | /* Offset of this level's groups within the array of groups. */ | ||
| 148 | size_t group_offset; | ||
| 149 | } bitmap_level_t; | ||
| 150 | |||
| 151 | typedef struct bitmap_info_s { | ||
| 152 | /* Logical number of bits in bitmap (stored at bottom level). */ | ||
| 153 | size_t nbits; | ||
| 154 | |||
| 155 | #ifdef BITMAP_USE_TREE | ||
| 156 | /* Number of levels necessary for nbits. */ | ||
| 157 | unsigned nlevels; | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Only the first (nlevels+1) elements are used, and levels are ordered | ||
| 161 | * bottom to top (e.g. the bottom level is stored in levels[0]). | ||
| 162 | */ | ||
| 163 | bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; | ||
| 164 | #else /* BITMAP_USE_TREE */ | ||
| 165 | /* Number of groups necessary for nbits. */ | ||
| 166 | size_t ngroups; | ||
| 167 | #endif /* BITMAP_USE_TREE */ | ||
| 168 | } bitmap_info_t; | ||
| 169 | |||
| 170 | void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); | ||
| 171 | void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); | ||
| 172 | size_t bitmap_size(const bitmap_info_t *binfo); | ||
| 173 | |||
| 174 | static inline bool | ||
| 175 | bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { | ||
| 176 | #ifdef BITMAP_USE_TREE | ||
| 177 | size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; | ||
| 178 | bitmap_t rg = bitmap[rgoff]; | ||
| 179 | /* The bitmap is full iff the root group is 0. */ | ||
| 180 | return (rg == 0); | ||
| 181 | #else | ||
| 182 | size_t i; | ||
| 183 | |||
| 184 | for (i = 0; i < binfo->ngroups; i++) { | ||
| 185 | if (bitmap[i] != 0) { | ||
| 186 | return false; | ||
| 187 | } | ||
| 188 | } | ||
| 189 | return true; | ||
| 190 | #endif | ||
| 191 | } | ||
| 192 | |||
| 193 | static inline bool | ||
| 194 | bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { | ||
| 195 | size_t goff; | ||
| 196 | bitmap_t g; | ||
| 197 | |||
| 198 | assert(bit < binfo->nbits); | ||
| 199 | goff = bit >> LG_BITMAP_GROUP_NBITS; | ||
| 200 | g = bitmap[goff]; | ||
| 201 | return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); | ||
| 202 | } | ||
| 203 | |||
| 204 | static inline void | ||
| 205 | bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { | ||
| 206 | size_t goff; | ||
| 207 | bitmap_t *gp; | ||
| 208 | bitmap_t g; | ||
| 209 | |||
| 210 | assert(bit < binfo->nbits); | ||
| 211 | assert(!bitmap_get(bitmap, binfo, bit)); | ||
| 212 | goff = bit >> LG_BITMAP_GROUP_NBITS; | ||
| 213 | gp = &bitmap[goff]; | ||
| 214 | g = *gp; | ||
| 215 | assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); | ||
| 216 | g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); | ||
| 217 | *gp = g; | ||
| 218 | assert(bitmap_get(bitmap, binfo, bit)); | ||
| 219 | #ifdef BITMAP_USE_TREE | ||
| 220 | /* Propagate group state transitions up the tree. */ | ||
| 221 | if (g == 0) { | ||
| 222 | unsigned i; | ||
| 223 | for (i = 1; i < binfo->nlevels; i++) { | ||
| 224 | bit = goff; | ||
| 225 | goff = bit >> LG_BITMAP_GROUP_NBITS; | ||
| 226 | gp = &bitmap[binfo->levels[i].group_offset + goff]; | ||
| 227 | g = *gp; | ||
| 228 | assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); | ||
| 229 | g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); | ||
| 230 | *gp = g; | ||
| 231 | if (g != 0) { | ||
| 232 | break; | ||
| 233 | } | ||
| 234 | } | ||
| 235 | } | ||
| 236 | #endif | ||
| 237 | } | ||
| 238 | |||
| 239 | /* ffu: find first unset >= bit. */ | ||
| 240 | static inline size_t | ||
| 241 | bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { | ||
| 242 | assert(min_bit < binfo->nbits); | ||
| 243 | |||
| 244 | #ifdef BITMAP_USE_TREE | ||
| 245 | size_t bit = 0; | ||
| 246 | for (unsigned level = binfo->nlevels; level--;) { | ||
| 247 | size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + | ||
| 248 | 1)); | ||
| 249 | bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit | ||
| 250 | >> lg_bits_per_group)]; | ||
| 251 | unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - | ||
| 252 | bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); | ||
| 253 | assert(group_nmask <= BITMAP_GROUP_NBITS); | ||
| 254 | bitmap_t group_mask = ~((1LU << group_nmask) - 1); | ||
| 255 | bitmap_t group_masked = group & group_mask; | ||
| 256 | if (group_masked == 0LU) { | ||
| 257 | if (group == 0LU) { | ||
| 258 | return binfo->nbits; | ||
| 259 | } | ||
| 260 | /* | ||
| 261 | * min_bit was preceded by one or more unset bits in | ||
| 262 | * this group, but there are no other unset bits in this | ||
| 263 | * group. Try again starting at the first bit of the | ||
| 264 | * next sibling. This will recurse at most once per | ||
| 265 | * non-root level. | ||
| 266 | */ | ||
| 267 | size_t sib_base = bit + (ZU(1) << lg_bits_per_group); | ||
| 268 | assert(sib_base > min_bit); | ||
| 269 | assert(sib_base > bit); | ||
| 270 | if (sib_base >= binfo->nbits) { | ||
| 271 | return binfo->nbits; | ||
| 272 | } | ||
| 273 | return bitmap_ffu(bitmap, binfo, sib_base); | ||
| 274 | } | ||
| 275 | bit += ((size_t)ffs_lu(group_masked)) << | ||
| 276 | (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); | ||
| 277 | } | ||
| 278 | assert(bit >= min_bit); | ||
| 279 | assert(bit < binfo->nbits); | ||
| 280 | return bit; | ||
| 281 | #else | ||
| 282 | size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; | ||
| 283 | bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) | ||
| 284 | - 1); | ||
| 285 | size_t bit; | ||
| 286 | do { | ||
| 287 | if (g != 0) { | ||
| 288 | bit = ffs_lu(g); | ||
| 289 | return (i << LG_BITMAP_GROUP_NBITS) + bit; | ||
| 290 | } | ||
| 291 | i++; | ||
| 292 | g = bitmap[i]; | ||
| 293 | } while (i < binfo->ngroups); | ||
| 294 | return binfo->nbits; | ||
| 295 | #endif | ||
| 296 | } | ||
| 297 | |||
| 298 | /* sfu: set first unset. */ | ||
| 299 | static inline size_t | ||
| 300 | bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { | ||
| 301 | size_t bit; | ||
| 302 | bitmap_t g; | ||
| 303 | unsigned i; | ||
| 304 | |||
| 305 | assert(!bitmap_full(bitmap, binfo)); | ||
| 306 | |||
| 307 | #ifdef BITMAP_USE_TREE | ||
| 308 | i = binfo->nlevels - 1; | ||
| 309 | g = bitmap[binfo->levels[i].group_offset]; | ||
| 310 | bit = ffs_lu(g); | ||
| 311 | while (i > 0) { | ||
| 312 | i--; | ||
| 313 | g = bitmap[binfo->levels[i].group_offset + bit]; | ||
| 314 | bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g); | ||
| 315 | } | ||
| 316 | #else | ||
| 317 | i = 0; | ||
| 318 | g = bitmap[0]; | ||
| 319 | while (g == 0) { | ||
| 320 | i++; | ||
| 321 | g = bitmap[i]; | ||
| 322 | } | ||
| 323 | bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g); | ||
| 324 | #endif | ||
| 325 | bitmap_set(bitmap, binfo, bit); | ||
| 326 | return bit; | ||
| 327 | } | ||
| 328 | |||
| 329 | static inline void | ||
| 330 | bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { | ||
| 331 | size_t goff; | ||
| 332 | bitmap_t *gp; | ||
| 333 | bitmap_t g; | ||
| 334 | UNUSED bool propagate; | ||
| 335 | |||
| 336 | assert(bit < binfo->nbits); | ||
| 337 | assert(bitmap_get(bitmap, binfo, bit)); | ||
| 338 | goff = bit >> LG_BITMAP_GROUP_NBITS; | ||
| 339 | gp = &bitmap[goff]; | ||
| 340 | g = *gp; | ||
| 341 | propagate = (g == 0); | ||
| 342 | assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); | ||
| 343 | g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); | ||
| 344 | *gp = g; | ||
| 345 | assert(!bitmap_get(bitmap, binfo, bit)); | ||
| 346 | #ifdef BITMAP_USE_TREE | ||
| 347 | /* Propagate group state transitions up the tree. */ | ||
| 348 | if (propagate) { | ||
| 349 | unsigned i; | ||
| 350 | for (i = 1; i < binfo->nlevels; i++) { | ||
| 351 | bit = goff; | ||
| 352 | goff = bit >> LG_BITMAP_GROUP_NBITS; | ||
| 353 | gp = &bitmap[binfo->levels[i].group_offset + goff]; | ||
| 354 | g = *gp; | ||
| 355 | propagate = (g == 0); | ||
| 356 | assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) | ||
| 357 | == 0); | ||
| 358 | g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); | ||
| 359 | *gp = g; | ||
| 360 | if (!propagate) { | ||
| 361 | break; | ||
| 362 | } | ||
| 363 | } | ||
| 364 | } | ||
| 365 | #endif /* BITMAP_USE_TREE */ | ||
| 366 | } | ||
| 367 | |||
| 368 | #endif /* JEMALLOC_INTERNAL_BITMAP_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/buf_writer.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/buf_writer.h deleted file mode 100644 index 37aa6de..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/buf_writer.h +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_BUF_WRITER_H | ||
| 2 | #define JEMALLOC_INTERNAL_BUF_WRITER_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Note: when using the buffered writer, cbopaque is passed to write_cb only | ||
| 6 | * when the buffer is flushed. It would make a difference if cbopaque points | ||
| 7 | * to something that's changing for each write_cb call, or something that | ||
| 8 | * affects write_cb in a way dependent on the content of the output string. | ||
| 9 | * However, the most typical usage case in practice is that cbopaque points to | ||
| 10 | * some "option like" content for the write_cb, so it doesn't matter. | ||
| 11 | */ | ||
| 12 | |||
| 13 | typedef struct { | ||
| 14 | write_cb_t *write_cb; | ||
| 15 | void *cbopaque; | ||
| 16 | char *buf; | ||
| 17 | size_t buf_size; | ||
| 18 | size_t buf_end; | ||
| 19 | bool internal_buf; | ||
| 20 | } buf_writer_t; | ||
| 21 | |||
| 22 | bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, | ||
| 23 | write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len); | ||
| 24 | void buf_writer_flush(buf_writer_t *buf_writer); | ||
| 25 | write_cb_t buf_writer_cb; | ||
| 26 | void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer); | ||
| 27 | |||
| 28 | typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit); | ||
| 29 | void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb, | ||
| 30 | void *read_cbopaque); | ||
| 31 | |||
| 32 | #endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/cache_bin.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/cache_bin.h deleted file mode 100644 index caf5be3..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/cache_bin.h +++ /dev/null | |||
| @@ -1,670 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_CACHE_BIN_H | ||
| 2 | #define JEMALLOC_INTERNAL_CACHE_BIN_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/ql.h" | ||
| 5 | #include "jemalloc/internal/sz.h" | ||
| 6 | |||
| 7 | /* | ||
| 8 | * The cache_bins are the mechanism that the tcache and the arena use to | ||
| 9 | * communicate. The tcache fills from and flushes to the arena by passing a | ||
| 10 | * cache_bin_t to fill/flush. When the arena needs to pull stats from the | ||
| 11 | * tcaches associated with it, it does so by iterating over its | ||
| 12 | * cache_bin_array_descriptor_t objects and reading out per-bin stats it | ||
| 13 | * contains. This makes it so that the arena need not know about the existence | ||
| 14 | * of the tcache at all. | ||
| 15 | */ | ||
| 16 | |||
| 17 | /* | ||
| 18 | * The size in bytes of each cache bin stack. We also use this to indicate | ||
| 19 | * *counts* of individual objects. | ||
| 20 | */ | ||
| 21 | typedef uint16_t cache_bin_sz_t; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Leave a noticeable mark pattern on the cache bin stack boundaries, in case a | ||
| 25 | * bug starts leaking those. Make it look like the junk pattern but be distinct | ||
| 26 | * from it. | ||
| 27 | */ | ||
| 28 | static const uintptr_t cache_bin_preceding_junk = | ||
| 29 | (uintptr_t)0x7a7a7a7a7a7a7a7aULL; | ||
| 30 | /* Note: a7 vs. 7a above -- this tells you which pointer leaked. */ | ||
| 31 | static const uintptr_t cache_bin_trailing_junk = | ||
| 32 | (uintptr_t)0xa7a7a7a7a7a7a7a7ULL; | ||
| 33 | |||
| 34 | /* | ||
| 35 | * That implies the following value, for the maximum number of items in any | ||
| 36 | * individual bin. The cache bins track their bounds looking just at the low | ||
| 37 | * bits of a pointer, compared against a cache_bin_sz_t. So that's | ||
| 38 | * 1 << (sizeof(cache_bin_sz_t) * 8) | ||
| 39 | * bytes spread across pointer sized objects to get the maximum. | ||
| 40 | */ | ||
| 41 | #define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \ | ||
| 42 | / sizeof(void *) - 1) | ||
| 43 | |||
| 44 | /* | ||
| 45 | * This lives inside the cache_bin (for locality reasons), and is initialized | ||
| 46 | * alongside it, but is otherwise not modified by any cache bin operations. | ||
| 47 | * It's logically public and maintained by its callers. | ||
| 48 | */ | ||
| 49 | typedef struct cache_bin_stats_s cache_bin_stats_t; | ||
| 50 | struct cache_bin_stats_s { | ||
| 51 | /* | ||
| 52 | * Number of allocation requests that corresponded to the size of this | ||
| 53 | * bin. | ||
| 54 | */ | ||
| 55 | uint64_t nrequests; | ||
| 56 | }; | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Read-only information associated with each element of tcache_t's tbins array | ||
| 60 | * is stored separately, mainly to reduce memory usage. | ||
| 61 | */ | ||
| 62 | typedef struct cache_bin_info_s cache_bin_info_t; | ||
| 63 | struct cache_bin_info_s { | ||
| 64 | cache_bin_sz_t ncached_max; | ||
| 65 | }; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Responsible for caching allocations associated with a single size. | ||
| 69 | * | ||
| 70 | * Several pointers are used to track the stack. To save on metadata bytes, | ||
| 71 | * only the stack_head is a full sized pointer (which is dereferenced on the | ||
| 72 | * fastpath), while the others store only the low 16 bits -- this is correct | ||
| 73 | * because a single stack never takes more space than 2^16 bytes, and at the | ||
| 74 | * same time only equality checks are performed on the low bits. | ||
| 75 | * | ||
| 76 | * (low addr) (high addr) | ||
| 77 | * |------stashed------|------available------|------cached-----| | ||
| 78 | * ^ ^ ^ ^ | ||
| 79 | * low_bound(derived) low_bits_full stack_head low_bits_empty | ||
| 80 | */ | ||
| 81 | typedef struct cache_bin_s cache_bin_t; | ||
| 82 | struct cache_bin_s { | ||
| 83 | /* | ||
| 84 | * The stack grows down. Whenever the bin is nonempty, the head points | ||
| 85 | * to an array entry containing a valid allocation. When it is empty, | ||
| 86 | * the head points to one element past the owned array. | ||
| 87 | */ | ||
| 88 | void **stack_head; | ||
| 89 | /* | ||
| 90 | * cur_ptr and stats are both modified frequently. Let's keep them | ||
| 91 | * close so that they have a higher chance of being on the same | ||
| 92 | * cacheline, thus less write-backs. | ||
| 93 | */ | ||
| 94 | cache_bin_stats_t tstats; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * The low bits of the address of the first item in the stack that | ||
| 98 | * hasn't been used since the last GC, to track the low water mark (min | ||
| 99 | * # of cached items). | ||
| 100 | * | ||
| 101 | * Since the stack grows down, this is a higher address than | ||
| 102 | * low_bits_full. | ||
| 103 | */ | ||
| 104 | uint16_t low_bits_low_water; | ||
| 105 | |||
| 106 | /* | ||
| 107 | * The low bits of the value that stack_head will take on when the array | ||
| 108 | * is full (of cached & stashed items). But remember that stack_head | ||
| 109 | * always points to a valid item when the array is nonempty -- this is | ||
| 110 | * in the array. | ||
| 111 | * | ||
| 112 | * Recall that since the stack grows down, this is the lowest available | ||
| 113 | * address in the array for caching. Only adjusted when stashing items. | ||
| 114 | */ | ||
| 115 | uint16_t low_bits_full; | ||
| 116 | |||
| 117 | /* | ||
| 118 | * The low bits of the value that stack_head will take on when the array | ||
| 119 | * is empty. | ||
| 120 | * | ||
| 121 | * The stack grows down -- this is one past the highest address in the | ||
| 122 | * array. Immutable after initialization. | ||
| 123 | */ | ||
| 124 | uint16_t low_bits_empty; | ||
| 125 | }; | ||
| 126 | |||
| 127 | /* | ||
| 128 | * The cache_bins live inside the tcache, but the arena (by design) isn't | ||
| 129 | * supposed to know much about tcache internals. To let the arena iterate over | ||
| 130 | * associated bins, we keep (with the tcache) a linked list of | ||
| 131 | * cache_bin_array_descriptor_ts that tell the arena how to find the bins. | ||
| 132 | */ | ||
| 133 | typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t; | ||
| 134 | struct cache_bin_array_descriptor_s { | ||
| 135 | /* | ||
| 136 | * The arena keeps a list of the cache bins associated with it, for | ||
| 137 | * stats collection. | ||
| 138 | */ | ||
| 139 | ql_elm(cache_bin_array_descriptor_t) link; | ||
| 140 | /* Pointers to the tcache bins. */ | ||
| 141 | cache_bin_t *bins; | ||
| 142 | }; | ||
| 143 | |||
| 144 | static inline void | ||
| 145 | cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, | ||
| 146 | cache_bin_t *bins) { | ||
| 147 | ql_elm_new(descriptor, link); | ||
| 148 | descriptor->bins = bins; | ||
| 149 | } | ||
| 150 | |||
| 151 | JEMALLOC_ALWAYS_INLINE bool | ||
| 152 | cache_bin_nonfast_aligned(const void *ptr) { | ||
| 153 | if (!config_uaf_detection) { | ||
| 154 | return false; | ||
| 155 | } | ||
| 156 | /* | ||
| 157 | * Currently we use alignment to decide which pointer to junk & stash on | ||
| 158 | * dealloc (for catching use-after-free). In some common cases a | ||
| 159 | * page-aligned check is needed already (sdalloc w/ config_prof), so we | ||
| 160 | * are getting it more or less for free -- no added instructions on | ||
| 161 | * free_fastpath. | ||
| 162 | * | ||
| 163 | * Another way of deciding which pointer to sample, is adding another | ||
| 164 | * thread_event to pick one every N bytes. That also adds no cost on | ||
| 165 | * the fastpath, however it will tend to pick large allocations which is | ||
| 166 | * not the desired behavior. | ||
| 167 | */ | ||
| 168 | return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0; | ||
| 169 | } | ||
| 170 | |||
| 171 | /* Returns ncached_max: Upper limit on ncached. */ | ||
| 172 | static inline cache_bin_sz_t | ||
| 173 | cache_bin_info_ncached_max(cache_bin_info_t *info) { | ||
| 174 | return info->ncached_max; | ||
| 175 | } | ||
| 176 | |||
| 177 | /* | ||
| 178 | * Internal. | ||
| 179 | * | ||
| 180 | * Asserts that the pointer associated with earlier is <= the one associated | ||
| 181 | * with later. | ||
| 182 | */ | ||
| 183 | static inline void | ||
| 184 | cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) { | ||
| 185 | if (earlier > later) { | ||
| 186 | assert(bin->low_bits_full > bin->low_bits_empty); | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | /* | ||
| 191 | * Internal. | ||
| 192 | * | ||
| 193 | * Does difference calculations that handle wraparound correctly. Earlier must | ||
| 194 | * be associated with the position earlier in memory. | ||
| 195 | */ | ||
| 196 | static inline uint16_t | ||
| 197 | cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) { | ||
| 198 | /* | ||
| 199 | * When it's racy, bin->low_bits_full can be modified concurrently. It | ||
| 200 | * can cross the uint16_t max value and become less than | ||
| 201 | * bin->low_bits_empty at the time of the check. | ||
| 202 | */ | ||
| 203 | if (!racy) { | ||
| 204 | cache_bin_assert_earlier(bin, earlier, later); | ||
| 205 | } | ||
| 206 | return later - earlier; | ||
| 207 | } | ||
| 208 | |||
| 209 | /* | ||
| 210 | * Number of items currently cached in the bin, without checking ncached_max. | ||
| 211 | * We require specifying whether or not the request is racy or not (i.e. whether | ||
| 212 | * or not concurrent modifications are possible). | ||
| 213 | */ | ||
| 214 | static inline cache_bin_sz_t | ||
| 215 | cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) { | ||
| 216 | cache_bin_sz_t diff = cache_bin_diff(bin, | ||
| 217 | (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy); | ||
| 218 | cache_bin_sz_t n = diff / sizeof(void *); | ||
| 219 | /* | ||
| 220 | * We have undefined behavior here; if this function is called from the | ||
| 221 | * arena stats updating code, then stack_head could change from the | ||
| 222 | * first line to the next one. Morally, these loads should be atomic, | ||
| 223 | * but compilers won't currently generate comparisons with in-memory | ||
| 224 | * operands against atomics, and these variables get accessed on the | ||
| 225 | * fast paths. This should still be "safe" in the sense of generating | ||
| 226 | * the correct assembly for the foreseeable future, though. | ||
| 227 | */ | ||
| 228 | assert(n == 0 || *(bin->stack_head) != NULL || racy); | ||
| 229 | return n; | ||
| 230 | } | ||
| 231 | |||
| 232 | /* | ||
| 233 | * Number of items currently cached in the bin, with checking ncached_max. The | ||
| 234 | * caller must know that no concurrent modification of the cache_bin is | ||
| 235 | * possible. | ||
| 236 | */ | ||
| 237 | static inline cache_bin_sz_t | ||
| 238 | cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) { | ||
| 239 | cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, | ||
| 240 | /* racy */ false); | ||
| 241 | assert(n <= cache_bin_info_ncached_max(info)); | ||
| 242 | return n; | ||
| 243 | } | ||
| 244 | |||
| 245 | /* | ||
| 246 | * Internal. | ||
| 247 | * | ||
| 248 | * A pointer to the position one past the end of the backing array. | ||
| 249 | * | ||
| 250 | * Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full' | ||
| 251 | * are subject to concurrent modifications. | ||
| 252 | */ | ||
| 253 | static inline void ** | ||
| 254 | cache_bin_empty_position_get(cache_bin_t *bin) { | ||
| 255 | cache_bin_sz_t diff = cache_bin_diff(bin, | ||
| 256 | (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, | ||
| 257 | /* racy */ false); | ||
| 258 | uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff; | ||
| 259 | void **ret = (void **)empty_bits; | ||
| 260 | |||
| 261 | assert(ret >= bin->stack_head); | ||
| 262 | |||
| 263 | return ret; | ||
| 264 | } | ||
| 265 | |||
| 266 | /* | ||
| 267 | * Internal. | ||
| 268 | * | ||
| 269 | * Calculates low bits of the lower bound of the usable cache bin's range (see | ||
| 270 | * cache_bin_t visual representation above). | ||
| 271 | * | ||
| 272 | * No values are concurrently modified, so should be safe to read in a | ||
| 273 | * multithreaded environment. Currently concurrent access happens only during | ||
| 274 | * arena statistics collection. | ||
| 275 | */ | ||
| 276 | static inline uint16_t | ||
| 277 | cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) { | ||
| 278 | return (uint16_t)bin->low_bits_empty - | ||
| 279 | info->ncached_max * sizeof(void *); | ||
| 280 | } | ||
| 281 | |||
| 282 | /* | ||
| 283 | * Internal. | ||
| 284 | * | ||
| 285 | * A pointer to the position with the lowest address of the backing array. | ||
| 286 | */ | ||
| 287 | static inline void ** | ||
| 288 | cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) { | ||
| 289 | cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info); | ||
| 290 | void **ret = cache_bin_empty_position_get(bin) - ncached_max; | ||
| 291 | assert(ret <= bin->stack_head); | ||
| 292 | |||
| 293 | return ret; | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * As the name implies. This is important since it's not correct to try to | ||
| 298 | * batch fill a nonempty cache bin. | ||
| 299 | */ | ||
| 300 | static inline void | ||
| 301 | cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) { | ||
| 302 | assert(cache_bin_ncached_get_local(bin, info) == 0); | ||
| 303 | assert(cache_bin_empty_position_get(bin) == bin->stack_head); | ||
| 304 | } | ||
| 305 | |||
| 306 | /* | ||
| 307 | * Get low water, but without any of the correctness checking we do for the | ||
| 308 | * caller-usable version, if we are temporarily breaking invariants (like | ||
| 309 | * ncached >= low_water during flush). | ||
| 310 | */ | ||
| 311 | static inline cache_bin_sz_t | ||
| 312 | cache_bin_low_water_get_internal(cache_bin_t *bin) { | ||
| 313 | return cache_bin_diff(bin, bin->low_bits_low_water, | ||
| 314 | bin->low_bits_empty, /* racy */ false) / sizeof(void *); | ||
| 315 | } | ||
| 316 | |||
| 317 | /* Returns the numeric value of low water in [0, ncached]. */ | ||
| 318 | static inline cache_bin_sz_t | ||
| 319 | cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) { | ||
| 320 | cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin); | ||
| 321 | assert(low_water <= cache_bin_info_ncached_max(info)); | ||
| 322 | assert(low_water <= cache_bin_ncached_get_local(bin, info)); | ||
| 323 | |||
| 324 | cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head, | ||
| 325 | bin->low_bits_low_water); | ||
| 326 | |||
| 327 | return low_water; | ||
| 328 | } | ||
| 329 | |||
| 330 | /* | ||
| 331 | * Indicates that the current cache bin position should be the low water mark | ||
| 332 | * going forward. | ||
| 333 | */ | ||
| 334 | static inline void | ||
| 335 | cache_bin_low_water_set(cache_bin_t *bin) { | ||
| 336 | bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head; | ||
| 337 | } | ||
| 338 | |||
| 339 | static inline void | ||
| 340 | cache_bin_low_water_adjust(cache_bin_t *bin) { | ||
| 341 | if (cache_bin_ncached_get_internal(bin, /* racy */ false) | ||
| 342 | < cache_bin_low_water_get_internal(bin)) { | ||
| 343 | cache_bin_low_water_set(bin); | ||
| 344 | } | ||
| 345 | } | ||
| 346 | |||
| 347 | JEMALLOC_ALWAYS_INLINE void * | ||
| 348 | cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) { | ||
| 349 | /* | ||
| 350 | * success (instead of ret) should be checked upon the return of this | ||
| 351 | * function. We avoid checking (ret == NULL) because there is never a | ||
| 352 | * null stored on the avail stack (which is unknown to the compiler), | ||
| 353 | * and eagerly checking ret would cause pipeline stall (waiting for the | ||
| 354 | * cacheline). | ||
| 355 | */ | ||
| 356 | |||
| 357 | /* | ||
| 358 | * This may read from the empty position; however the loaded value won't | ||
| 359 | * be used. It's safe because the stack has one more slot reserved. | ||
| 360 | */ | ||
| 361 | void *ret = *bin->stack_head; | ||
| 362 | uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head; | ||
| 363 | void **new_head = bin->stack_head + 1; | ||
| 364 | |||
| 365 | /* | ||
| 366 | * Note that the low water mark is at most empty; if we pass this check, | ||
| 367 | * we know we're non-empty. | ||
| 368 | */ | ||
| 369 | if (likely(low_bits != bin->low_bits_low_water)) { | ||
| 370 | bin->stack_head = new_head; | ||
| 371 | *success = true; | ||
| 372 | return ret; | ||
| 373 | } | ||
| 374 | if (!adjust_low_water) { | ||
| 375 | *success = false; | ||
| 376 | return NULL; | ||
| 377 | } | ||
| 378 | /* | ||
| 379 | * In the fast-path case where we call alloc_easy and then alloc, the | ||
| 380 | * previous checking and computation is optimized away -- we didn't | ||
| 381 | * actually commit any of our operations. | ||
| 382 | */ | ||
| 383 | if (likely(low_bits != bin->low_bits_empty)) { | ||
| 384 | bin->stack_head = new_head; | ||
| 385 | bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head; | ||
| 386 | *success = true; | ||
| 387 | return ret; | ||
| 388 | } | ||
| 389 | *success = false; | ||
| 390 | return NULL; | ||
| 391 | } | ||
| 392 | |||
| 393 | /* | ||
| 394 | * Allocate an item out of the bin, failing if we're at the low-water mark. | ||
| 395 | */ | ||
| 396 | JEMALLOC_ALWAYS_INLINE void * | ||
| 397 | cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { | ||
| 398 | /* We don't look at info if we're not adjusting low-water. */ | ||
| 399 | return cache_bin_alloc_impl(bin, success, false); | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | ||
| 403 | * Allocate an item out of the bin, even if we're currently at the low-water | ||
| 404 | * mark (and failing only if the bin is empty). | ||
| 405 | */ | ||
| 406 | JEMALLOC_ALWAYS_INLINE void * | ||
| 407 | cache_bin_alloc(cache_bin_t *bin, bool *success) { | ||
| 408 | return cache_bin_alloc_impl(bin, success, true); | ||
| 409 | } | ||
| 410 | |||
| 411 | JEMALLOC_ALWAYS_INLINE cache_bin_sz_t | ||
| 412 | cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) { | ||
| 413 | cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, | ||
| 414 | /* racy */ false); | ||
| 415 | if (n > num) { | ||
| 416 | n = (cache_bin_sz_t)num; | ||
| 417 | } | ||
| 418 | memcpy(out, bin->stack_head, n * sizeof(void *)); | ||
| 419 | bin->stack_head += n; | ||
| 420 | cache_bin_low_water_adjust(bin); | ||
| 421 | |||
| 422 | return n; | ||
| 423 | } | ||
| 424 | |||
| 425 | JEMALLOC_ALWAYS_INLINE bool | ||
| 426 | cache_bin_full(cache_bin_t *bin) { | ||
| 427 | return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full); | ||
| 428 | } | ||
| 429 | |||
| 430 | /* | ||
| 431 | * Free an object into the given bin. Fails only if the bin is full. | ||
| 432 | */ | ||
| 433 | JEMALLOC_ALWAYS_INLINE bool | ||
| 434 | cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) { | ||
| 435 | if (unlikely(cache_bin_full(bin))) { | ||
| 436 | return false; | ||
| 437 | } | ||
| 438 | |||
| 439 | bin->stack_head--; | ||
| 440 | *bin->stack_head = ptr; | ||
| 441 | cache_bin_assert_earlier(bin, bin->low_bits_full, | ||
| 442 | (uint16_t)(uintptr_t)bin->stack_head); | ||
| 443 | |||
| 444 | return true; | ||
| 445 | } | ||
| 446 | |||
| 447 | /* Returns false if failed to stash (i.e. bin is full). */ | ||
| 448 | JEMALLOC_ALWAYS_INLINE bool | ||
| 449 | cache_bin_stash(cache_bin_t *bin, void *ptr) { | ||
| 450 | if (cache_bin_full(bin)) { | ||
| 451 | return false; | ||
| 452 | } | ||
| 453 | |||
| 454 | /* Stash at the full position, in the [full, head) range. */ | ||
| 455 | uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head; | ||
| 456 | /* Wraparound handled as well. */ | ||
| 457 | uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head, | ||
| 458 | /* racy */ false); | ||
| 459 | *(void **)((uintptr_t)bin->stack_head - diff) = ptr; | ||
| 460 | |||
| 461 | assert(!cache_bin_full(bin)); | ||
| 462 | bin->low_bits_full += sizeof(void *); | ||
| 463 | cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head); | ||
| 464 | |||
| 465 | return true; | ||
| 466 | } | ||
| 467 | |||
| 468 | /* | ||
| 469 | * Get the number of stashed pointers. | ||
| 470 | * | ||
| 471 | * When called from a thread not owning the TLS (i.e. racy = true), it's | ||
| 472 | * important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can | ||
| 473 | * be modified concurrently and almost none assertions about their values can be | ||
| 474 | * made. | ||
| 475 | */ | ||
| 476 | JEMALLOC_ALWAYS_INLINE cache_bin_sz_t | ||
| 477 | cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info, | ||
| 478 | bool racy) { | ||
| 479 | cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info); | ||
| 480 | uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin, | ||
| 481 | info); | ||
| 482 | |||
| 483 | cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound, | ||
| 484 | bin->low_bits_full, racy) / sizeof(void *); | ||
| 485 | assert(n <= ncached_max); | ||
| 486 | |||
| 487 | if (!racy) { | ||
| 488 | /* Below are for assertions only. */ | ||
| 489 | void **low_bound = cache_bin_low_bound_get(bin, info); | ||
| 490 | |||
| 491 | assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound); | ||
| 492 | void *stashed = *(low_bound + n - 1); | ||
| 493 | bool aligned = cache_bin_nonfast_aligned(stashed); | ||
| 494 | #ifdef JEMALLOC_JET | ||
| 495 | /* Allow arbitrary pointers to be stashed in tests. */ | ||
| 496 | aligned = true; | ||
| 497 | #endif | ||
| 498 | assert(n == 0 || (stashed != NULL && aligned)); | ||
| 499 | } | ||
| 500 | |||
| 501 | return n; | ||
| 502 | } | ||
| 503 | |||
| 504 | JEMALLOC_ALWAYS_INLINE cache_bin_sz_t | ||
| 505 | cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) { | ||
| 506 | cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info, | ||
| 507 | /* racy */ false); | ||
| 508 | assert(n <= cache_bin_info_ncached_max(info)); | ||
| 509 | return n; | ||
| 510 | } | ||
| 511 | |||
| 512 | /* | ||
| 513 | * Obtain a racy view of the number of items currently in the cache bin, in the | ||
| 514 | * presence of possible concurrent modifications. | ||
| 515 | */ | ||
| 516 | static inline void | ||
| 517 | cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info, | ||
| 518 | cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) { | ||
| 519 | cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true); | ||
| 520 | assert(n <= cache_bin_info_ncached_max(info)); | ||
| 521 | *ncached = n; | ||
| 522 | |||
| 523 | n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true); | ||
| 524 | assert(n <= cache_bin_info_ncached_max(info)); | ||
| 525 | *nstashed = n; | ||
| 526 | /* Note that cannot assert ncached + nstashed <= ncached_max (racy). */ | ||
| 527 | } | ||
| 528 | |||
| 529 | /* | ||
| 530 | * Filling and flushing are done in batch, on arrays of void *s. For filling, | ||
| 531 | * the arrays go forward, and can be accessed with ordinary array arithmetic. | ||
| 532 | * For flushing, we work from the end backwards, and so need to use special | ||
| 533 | * accessors that invert the usual ordering. | ||
| 534 | * | ||
| 535 | * This is important for maintaining first-fit; the arena code fills with | ||
| 536 | * earliest objects first, and so those are the ones we should return first for | ||
| 537 | * cache_bin_alloc calls. When flushing, we should flush the objects that we | ||
| 538 | * wish to return later; those at the end of the array. This is better for the | ||
| 539 | * first-fit heuristic as well as for cache locality; the most recently freed | ||
| 540 | * objects are the ones most likely to still be in cache. | ||
| 541 | * | ||
| 542 | * This all sounds very hand-wavey and theoretical, but reverting the ordering | ||
| 543 | * on one or the other pathway leads to measurable slowdowns. | ||
| 544 | */ | ||
| 545 | |||
| 546 | typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t; | ||
| 547 | struct cache_bin_ptr_array_s { | ||
| 548 | cache_bin_sz_t n; | ||
| 549 | void **ptr; | ||
| 550 | }; | ||
| 551 | |||
| 552 | /* | ||
| 553 | * Declare a cache_bin_ptr_array_t sufficient for nval items. | ||
| 554 | * | ||
| 555 | * In the current implementation, this could be just part of a | ||
| 556 | * cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory. | ||
| 557 | * Indirecting behind a macro, though, means experimenting with linked-list | ||
| 558 | * representations is easy (since they'll require an alloca in the calling | ||
| 559 | * frame). | ||
| 560 | */ | ||
| 561 | #define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \ | ||
| 562 | cache_bin_ptr_array_t name; \ | ||
| 563 | name.n = (nval) | ||
| 564 | |||
| 565 | /* | ||
| 566 | * Start a fill. The bin must be empty, and This must be followed by a | ||
| 567 | * finish_fill call before doing any alloc/dalloc operations on the bin. | ||
| 568 | */ | ||
| 569 | static inline void | ||
| 570 | cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info, | ||
| 571 | cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) { | ||
| 572 | cache_bin_assert_empty(bin, info); | ||
| 573 | arr->ptr = cache_bin_empty_position_get(bin) - nfill; | ||
| 574 | } | ||
| 575 | |||
| 576 | /* | ||
| 577 | * While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to | ||
| 578 | * fill, nfilled here is the number we actually filled (which may be less, in | ||
| 579 | * case of OOM. | ||
| 580 | */ | ||
| 581 | static inline void | ||
| 582 | cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info, | ||
| 583 | cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) { | ||
| 584 | cache_bin_assert_empty(bin, info); | ||
| 585 | void **empty_position = cache_bin_empty_position_get(bin); | ||
| 586 | if (nfilled < arr->n) { | ||
| 587 | memmove(empty_position - nfilled, empty_position - arr->n, | ||
| 588 | nfilled * sizeof(void *)); | ||
| 589 | } | ||
| 590 | bin->stack_head = empty_position - nfilled; | ||
| 591 | } | ||
| 592 | |||
| 593 | /* | ||
| 594 | * Same deal, but with flush. Unlike fill (which can fail), the user must flush | ||
| 595 | * everything we give them. | ||
| 596 | */ | ||
| 597 | static inline void | ||
| 598 | cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info, | ||
| 599 | cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) { | ||
| 600 | arr->ptr = cache_bin_empty_position_get(bin) - nflush; | ||
| 601 | assert(cache_bin_ncached_get_local(bin, info) == 0 | ||
| 602 | || *arr->ptr != NULL); | ||
| 603 | } | ||
| 604 | |||
| 605 | static inline void | ||
| 606 | cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info, | ||
| 607 | cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) { | ||
| 608 | unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed; | ||
| 609 | memmove(bin->stack_head + nflushed, bin->stack_head, | ||
| 610 | rem * sizeof(void *)); | ||
| 611 | bin->stack_head = bin->stack_head + nflushed; | ||
| 612 | cache_bin_low_water_adjust(bin); | ||
| 613 | } | ||
| 614 | |||
| 615 | static inline void | ||
| 616 | cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind, | ||
| 617 | cache_bin_info_t *info, cache_bin_ptr_array_t *arr, | ||
| 618 | cache_bin_sz_t nstashed) { | ||
| 619 | assert(nstashed > 0); | ||
| 620 | assert(cache_bin_nstashed_get_local(bin, info) == nstashed); | ||
| 621 | |||
| 622 | void **low_bound = cache_bin_low_bound_get(bin, info); | ||
| 623 | arr->ptr = low_bound; | ||
| 624 | assert(*arr->ptr != NULL); | ||
| 625 | } | ||
| 626 | |||
| 627 | static inline void | ||
| 628 | cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) { | ||
| 629 | void **low_bound = cache_bin_low_bound_get(bin, info); | ||
| 630 | |||
| 631 | /* Reset the bin local full position. */ | ||
| 632 | bin->low_bits_full = (uint16_t)(uintptr_t)low_bound; | ||
| 633 | assert(cache_bin_nstashed_get_local(bin, info) == 0); | ||
| 634 | } | ||
| 635 | |||
| 636 | /* | ||
| 637 | * Initialize a cache_bin_info to represent up to the given number of items in | ||
| 638 | * the cache_bins it is associated with. | ||
| 639 | */ | ||
| 640 | void cache_bin_info_init(cache_bin_info_t *bin_info, | ||
| 641 | cache_bin_sz_t ncached_max); | ||
| 642 | /* | ||
| 643 | * Given an array of initialized cache_bin_info_ts, determine how big an | ||
| 644 | * allocation is required to initialize a full set of cache_bin_ts. | ||
| 645 | */ | ||
| 646 | void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos, | ||
| 647 | size_t *size, size_t *alignment); | ||
| 648 | |||
| 649 | /* | ||
| 650 | * Actually initialize some cache bins. Callers should allocate the backing | ||
| 651 | * memory indicated by a call to cache_bin_compute_alloc. They should then | ||
| 652 | * preincrement, call init once for each bin and info, and then call | ||
| 653 | * cache_bin_postincrement. *alloc_cur will then point immediately past the end | ||
| 654 | * of the allocation. | ||
| 655 | */ | ||
| 656 | void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, | ||
| 657 | void *alloc, size_t *cur_offset); | ||
| 658 | void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, | ||
| 659 | void *alloc, size_t *cur_offset); | ||
| 660 | void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc, | ||
| 661 | size_t *cur_offset); | ||
| 662 | |||
| 663 | /* | ||
| 664 | * If a cache bin was zero initialized (either because it lives in static or | ||
| 665 | * thread-local storage, or was memset to 0), this function indicates whether or | ||
| 666 | * not cache_bin_init was called on it. | ||
| 667 | */ | ||
| 668 | bool cache_bin_still_zero_initialized(cache_bin_t *bin); | ||
| 669 | |||
| 670 | #endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ckh.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ckh.h deleted file mode 100644 index 7b3850b..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ckh.h +++ /dev/null | |||
| @@ -1,101 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_CKH_H | ||
| 2 | #define JEMALLOC_INTERNAL_CKH_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/tsd.h" | ||
| 5 | |||
| 6 | /* Cuckoo hashing implementation. Skip to the end for the interface. */ | ||
| 7 | |||
| 8 | /******************************************************************************/ | ||
| 9 | /* INTERNAL DEFINITIONS -- IGNORE */ | ||
| 10 | /******************************************************************************/ | ||
| 11 | |||
| 12 | /* Maintain counters used to get an idea of performance. */ | ||
| 13 | /* #define CKH_COUNT */ | ||
| 14 | /* Print counter values in ckh_delete() (requires CKH_COUNT). */ | ||
| 15 | /* #define CKH_VERBOSE */ | ||
| 16 | |||
| 17 | /* | ||
| 18 | * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit | ||
| 19 | * one bucket per L1 cache line. | ||
| 20 | */ | ||
| 21 | #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) | ||
| 22 | |||
| 23 | /* Typedefs to allow easy function pointer passing. */ | ||
| 24 | typedef void ckh_hash_t (const void *, size_t[2]); | ||
| 25 | typedef bool ckh_keycomp_t (const void *, const void *); | ||
| 26 | |||
| 27 | /* Hash table cell. */ | ||
| 28 | typedef struct { | ||
| 29 | const void *key; | ||
| 30 | const void *data; | ||
| 31 | } ckhc_t; | ||
| 32 | |||
| 33 | /* The hash table itself. */ | ||
| 34 | typedef struct { | ||
| 35 | #ifdef CKH_COUNT | ||
| 36 | /* Counters used to get an idea of performance. */ | ||
| 37 | uint64_t ngrows; | ||
| 38 | uint64_t nshrinks; | ||
| 39 | uint64_t nshrinkfails; | ||
| 40 | uint64_t ninserts; | ||
| 41 | uint64_t nrelocs; | ||
| 42 | #endif | ||
| 43 | |||
| 44 | /* Used for pseudo-random number generation. */ | ||
| 45 | uint64_t prng_state; | ||
| 46 | |||
| 47 | /* Total number of items. */ | ||
| 48 | size_t count; | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Minimum and current number of hash table buckets. There are | ||
| 52 | * 2^LG_CKH_BUCKET_CELLS cells per bucket. | ||
| 53 | */ | ||
| 54 | unsigned lg_minbuckets; | ||
| 55 | unsigned lg_curbuckets; | ||
| 56 | |||
| 57 | /* Hash and comparison functions. */ | ||
| 58 | ckh_hash_t *hash; | ||
| 59 | ckh_keycomp_t *keycomp; | ||
| 60 | |||
| 61 | /* Hash table with 2^lg_curbuckets buckets. */ | ||
| 62 | ckhc_t *tab; | ||
| 63 | } ckh_t; | ||
| 64 | |||
| 65 | /******************************************************************************/ | ||
| 66 | /* BEGIN PUBLIC API */ | ||
| 67 | /******************************************************************************/ | ||
| 68 | |||
| 69 | /* Lifetime management. Minitems is the initial capacity. */ | ||
| 70 | bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, | ||
| 71 | ckh_keycomp_t *keycomp); | ||
| 72 | void ckh_delete(tsd_t *tsd, ckh_t *ckh); | ||
| 73 | |||
| 74 | /* Get the number of elements in the set. */ | ||
| 75 | size_t ckh_count(ckh_t *ckh); | ||
| 76 | |||
| 77 | /* | ||
| 78 | * To iterate over the elements in the table, initialize *tabind to 0 and call | ||
| 79 | * this function until it returns true. Each call that returns false will | ||
| 80 | * update *key and *data to the next element in the table, assuming the pointers | ||
| 81 | * are non-NULL. | ||
| 82 | */ | ||
| 83 | bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Basic hash table operations -- insert, removal, lookup. For ckh_remove and | ||
| 87 | * ckh_search, key or data can be NULL. The hash-table only stores pointers to | ||
| 88 | * the key and value, and doesn't do any lifetime management. | ||
| 89 | */ | ||
| 90 | bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); | ||
| 91 | bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, | ||
| 92 | void **data); | ||
| 93 | bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); | ||
| 94 | |||
| 95 | /* Some useful hash and comparison functions for strings and pointers. */ | ||
| 96 | void ckh_string_hash(const void *key, size_t r_hash[2]); | ||
| 97 | bool ckh_string_keycomp(const void *k1, const void *k2); | ||
| 98 | void ckh_pointer_hash(const void *key, size_t r_hash[2]); | ||
| 99 | bool ckh_pointer_keycomp(const void *k1, const void *k2); | ||
| 100 | |||
| 101 | #endif /* JEMALLOC_INTERNAL_CKH_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/counter.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/counter.h deleted file mode 100644 index 79abf06..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/counter.h +++ /dev/null | |||
| @@ -1,34 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_COUNTER_H | ||
| 2 | #define JEMALLOC_INTERNAL_COUNTER_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/mutex.h" | ||
| 5 | |||
| 6 | typedef struct counter_accum_s { | ||
| 7 | LOCKEDINT_MTX_DECLARE(mtx) | ||
| 8 | locked_u64_t accumbytes; | ||
| 9 | uint64_t interval; | ||
| 10 | } counter_accum_t; | ||
| 11 | |||
| 12 | JEMALLOC_ALWAYS_INLINE bool | ||
| 13 | counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) { | ||
| 14 | uint64_t interval = counter->interval; | ||
| 15 | assert(interval > 0); | ||
| 16 | LOCKEDINT_MTX_LOCK(tsdn, counter->mtx); | ||
| 17 | /* | ||
| 18 | * If the event moves fast enough (and/or if the event handling is slow | ||
| 19 | * enough), extreme overflow can cause counter trigger coalescing. | ||
| 20 | * This is an intentional mechanism that avoids rate-limiting | ||
| 21 | * allocation. | ||
| 22 | */ | ||
| 23 | bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx), | ||
| 24 | &counter->accumbytes, bytes, interval); | ||
| 25 | LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx); | ||
| 26 | return overflow; | ||
| 27 | } | ||
| 28 | |||
| 29 | bool counter_accum_init(counter_accum_t *counter, uint64_t interval); | ||
| 30 | void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter); | ||
| 31 | void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter); | ||
| 32 | void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter); | ||
| 33 | |||
| 34 | #endif /* JEMALLOC_INTERNAL_COUNTER_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ctl.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ctl.h deleted file mode 100644 index 63d27f8..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ctl.h +++ /dev/null | |||
| @@ -1,159 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_CTL_H | ||
| 2 | #define JEMALLOC_INTERNAL_CTL_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 5 | #include "jemalloc/internal/malloc_io.h" | ||
| 6 | #include "jemalloc/internal/mutex_prof.h" | ||
| 7 | #include "jemalloc/internal/ql.h" | ||
| 8 | #include "jemalloc/internal/sc.h" | ||
| 9 | #include "jemalloc/internal/stats.h" | ||
| 10 | |||
| 11 | /* Maximum ctl tree depth. */ | ||
| 12 | #define CTL_MAX_DEPTH 7 | ||
| 13 | |||
| 14 | typedef struct ctl_node_s { | ||
| 15 | bool named; | ||
| 16 | } ctl_node_t; | ||
| 17 | |||
| 18 | typedef struct ctl_named_node_s { | ||
| 19 | ctl_node_t node; | ||
| 20 | const char *name; | ||
| 21 | /* If (nchildren == 0), this is a terminal node. */ | ||
| 22 | size_t nchildren; | ||
| 23 | const ctl_node_t *children; | ||
| 24 | int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, | ||
| 25 | size_t); | ||
| 26 | } ctl_named_node_t; | ||
| 27 | |||
| 28 | typedef struct ctl_indexed_node_s { | ||
| 29 | struct ctl_node_s node; | ||
| 30 | const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, | ||
| 31 | size_t); | ||
| 32 | } ctl_indexed_node_t; | ||
| 33 | |||
| 34 | typedef struct ctl_arena_stats_s { | ||
| 35 | arena_stats_t astats; | ||
| 36 | |||
| 37 | /* Aggregate stats for small size classes, based on bin stats. */ | ||
| 38 | size_t allocated_small; | ||
| 39 | uint64_t nmalloc_small; | ||
| 40 | uint64_t ndalloc_small; | ||
| 41 | uint64_t nrequests_small; | ||
| 42 | uint64_t nfills_small; | ||
| 43 | uint64_t nflushes_small; | ||
| 44 | |||
| 45 | bin_stats_data_t bstats[SC_NBINS]; | ||
| 46 | arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; | ||
| 47 | pac_estats_t estats[SC_NPSIZES]; | ||
| 48 | hpa_shard_stats_t hpastats; | ||
| 49 | sec_stats_t secstats; | ||
| 50 | } ctl_arena_stats_t; | ||
| 51 | |||
| 52 | typedef struct ctl_stats_s { | ||
| 53 | size_t allocated; | ||
| 54 | size_t active; | ||
| 55 | size_t metadata; | ||
| 56 | size_t metadata_thp; | ||
| 57 | size_t resident; | ||
| 58 | size_t mapped; | ||
| 59 | size_t retained; | ||
| 60 | |||
| 61 | background_thread_stats_t background_thread; | ||
| 62 | mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes]; | ||
| 63 | } ctl_stats_t; | ||
| 64 | |||
| 65 | typedef struct ctl_arena_s ctl_arena_t; | ||
| 66 | struct ctl_arena_s { | ||
| 67 | unsigned arena_ind; | ||
| 68 | bool initialized; | ||
| 69 | ql_elm(ctl_arena_t) destroyed_link; | ||
| 70 | |||
| 71 | /* Basic stats, supported even if !config_stats. */ | ||
| 72 | unsigned nthreads; | ||
| 73 | const char *dss; | ||
| 74 | ssize_t dirty_decay_ms; | ||
| 75 | ssize_t muzzy_decay_ms; | ||
| 76 | size_t pactive; | ||
| 77 | size_t pdirty; | ||
| 78 | size_t pmuzzy; | ||
| 79 | |||
| 80 | /* NULL if !config_stats. */ | ||
| 81 | ctl_arena_stats_t *astats; | ||
| 82 | }; | ||
| 83 | |||
| 84 | typedef struct ctl_arenas_s { | ||
| 85 | uint64_t epoch; | ||
| 86 | unsigned narenas; | ||
| 87 | ql_head(ctl_arena_t) destroyed; | ||
| 88 | |||
| 89 | /* | ||
| 90 | * Element 0 corresponds to merged stats for extant arenas (accessed via | ||
| 91 | * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for | ||
| 92 | * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the | ||
| 93 | * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas. | ||
| 94 | */ | ||
| 95 | ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; | ||
| 96 | } ctl_arenas_t; | ||
| 97 | |||
| 98 | int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, | ||
| 99 | void *newp, size_t newlen); | ||
| 100 | int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); | ||
| 101 | int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, | ||
| 102 | size_t *oldlenp, void *newp, size_t newlen); | ||
| 103 | int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, | ||
| 104 | size_t *miblenp); | ||
| 105 | int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, | ||
| 106 | size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen); | ||
| 107 | bool ctl_boot(void); | ||
| 108 | void ctl_prefork(tsdn_t *tsdn); | ||
| 109 | void ctl_postfork_parent(tsdn_t *tsdn); | ||
| 110 | void ctl_postfork_child(tsdn_t *tsdn); | ||
| 111 | void ctl_mtx_assert_held(tsdn_t *tsdn); | ||
| 112 | |||
| 113 | #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ | ||
| 114 | if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ | ||
| 115 | != 0) { \ | ||
| 116 | malloc_printf( \ | ||
| 117 | "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ | ||
| 118 | name); \ | ||
| 119 | abort(); \ | ||
| 120 | } \ | ||
| 121 | } while (0) | ||
| 122 | |||
| 123 | #define xmallctlnametomib(name, mibp, miblenp) do { \ | ||
| 124 | if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ | ||
| 125 | malloc_printf("<jemalloc>: Failure in " \ | ||
| 126 | "xmallctlnametomib(\"%s\", ...)\n", name); \ | ||
| 127 | abort(); \ | ||
| 128 | } \ | ||
| 129 | } while (0) | ||
| 130 | |||
| 131 | #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ | ||
| 132 | if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ | ||
| 133 | newlen) != 0) { \ | ||
| 134 | malloc_write( \ | ||
| 135 | "<jemalloc>: Failure in xmallctlbymib()\n"); \ | ||
| 136 | abort(); \ | ||
| 137 | } \ | ||
| 138 | } while (0) | ||
| 139 | |||
| 140 | #define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \ | ||
| 141 | if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \ | ||
| 142 | != 0) { \ | ||
| 143 | malloc_write( \ | ||
| 144 | "<jemalloc>: Failure in ctl_mibnametomib()\n"); \ | ||
| 145 | abort(); \ | ||
| 146 | } \ | ||
| 147 | } while (0) | ||
| 148 | |||
| 149 | #define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \ | ||
| 150 | newp, newlen) do { \ | ||
| 151 | if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \ | ||
| 152 | oldp, oldlenp, newp, newlen) != 0) { \ | ||
| 153 | malloc_write( \ | ||
| 154 | "<jemalloc>: Failure in ctl_bymibname()\n"); \ | ||
| 155 | abort(); \ | ||
| 156 | } \ | ||
| 157 | } while (0) | ||
| 158 | |||
| 159 | #endif /* JEMALLOC_INTERNAL_CTL_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/decay.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/decay.h deleted file mode 100644 index cf6a9d2..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/decay.h +++ /dev/null | |||
| @@ -1,186 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_DECAY_H | ||
| 2 | #define JEMALLOC_INTERNAL_DECAY_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/smoothstep.h" | ||
| 5 | |||
| 6 | #define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1) | ||
| 7 | |||
| 8 | /* | ||
| 9 | * The decay_t computes the number of pages we should purge at any given time. | ||
| 10 | * Page allocators inform a decay object when pages enter a decay-able state | ||
| 11 | * (i.e. dirty or muzzy), and query it to determine how many pages should be | ||
| 12 | * purged at any given time. | ||
| 13 | * | ||
| 14 | * This is mostly a single-threaded data structure and doesn't care about | ||
| 15 | * synchronization at all; it's the caller's responsibility to manage their | ||
| 16 | * synchronization on their own. There are two exceptions: | ||
| 17 | * 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query). | ||
| 18 | * 2) The mtx and purging fields live (and are initialized) here, but are | ||
| 19 | * logically owned by the page allocator. This is just a convenience (since | ||
| 20 | * those fields would be duplicated for both the dirty and muzzy states | ||
| 21 | * otherwise). | ||
| 22 | */ | ||
| 23 | typedef struct decay_s decay_t; | ||
| 24 | struct decay_s { | ||
| 25 | /* Synchronizes all non-atomic fields. */ | ||
| 26 | malloc_mutex_t mtx; | ||
| 27 | /* | ||
| 28 | * True if a thread is currently purging the extents associated with | ||
| 29 | * this decay structure. | ||
| 30 | */ | ||
| 31 | bool purging; | ||
| 32 | /* | ||
| 33 | * Approximate time in milliseconds from the creation of a set of unused | ||
| 34 | * dirty pages until an equivalent set of unused dirty pages is purged | ||
| 35 | * and/or reused. | ||
| 36 | */ | ||
| 37 | atomic_zd_t time_ms; | ||
| 38 | /* time / SMOOTHSTEP_NSTEPS. */ | ||
| 39 | nstime_t interval; | ||
| 40 | /* | ||
| 41 | * Time at which the current decay interval logically started. We do | ||
| 42 | * not actually advance to a new epoch until sometime after it starts | ||
| 43 | * because of scheduling and computation delays, and it is even possible | ||
| 44 | * to completely skip epochs. In all cases, during epoch advancement we | ||
| 45 | * merge all relevant activity into the most recently recorded epoch. | ||
| 46 | */ | ||
| 47 | nstime_t epoch; | ||
| 48 | /* Deadline randomness generator. */ | ||
| 49 | uint64_t jitter_state; | ||
| 50 | /* | ||
| 51 | * Deadline for current epoch. This is the sum of interval and per | ||
| 52 | * epoch jitter which is a uniform random variable in [0..interval). | ||
| 53 | * Epochs always advance by precise multiples of interval, but we | ||
| 54 | * randomize the deadline to reduce the likelihood of arenas purging in | ||
| 55 | * lockstep. | ||
| 56 | */ | ||
| 57 | nstime_t deadline; | ||
| 58 | /* | ||
| 59 | * The number of pages we cap ourselves at in the current epoch, per | ||
| 60 | * decay policies. Updated on an epoch change. After an epoch change, | ||
| 61 | * the caller should take steps to try to purge down to this amount. | ||
| 62 | */ | ||
| 63 | size_t npages_limit; | ||
| 64 | /* | ||
| 65 | * Number of unpurged pages at beginning of current epoch. During epoch | ||
| 66 | * advancement we use the delta between arena->decay_*.nunpurged and | ||
| 67 | * ecache_npages_get(&arena->ecache_*) to determine how many dirty pages, | ||
| 68 | * if any, were generated. | ||
| 69 | */ | ||
| 70 | size_t nunpurged; | ||
| 71 | /* | ||
| 72 | * Trailing log of how many unused dirty pages were generated during | ||
| 73 | * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last | ||
| 74 | * element is the most recent epoch. Corresponding epoch times are | ||
| 75 | * relative to epoch. | ||
| 76 | * | ||
| 77 | * Updated only on epoch advance, triggered by | ||
| 78 | * decay_maybe_advance_epoch, below. | ||
| 79 | */ | ||
| 80 | size_t backlog[SMOOTHSTEP_NSTEPS]; | ||
| 81 | |||
| 82 | /* Peak number of pages in associated extents. Used for debug only. */ | ||
| 83 | uint64_t ceil_npages; | ||
| 84 | }; | ||
| 85 | |||
| 86 | /* | ||
| 87 | * The current decay time setting. This is the only public access to a decay_t | ||
| 88 | * that's allowed without holding mtx. | ||
| 89 | */ | ||
| 90 | static inline ssize_t | ||
| 91 | decay_ms_read(const decay_t *decay) { | ||
| 92 | return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); | ||
| 93 | } | ||
| 94 | |||
| 95 | /* | ||
| 96 | * See the comment on the struct field -- the limit on pages we should allow in | ||
| 97 | * this decay state this epoch. | ||
| 98 | */ | ||
| 99 | static inline size_t | ||
| 100 | decay_npages_limit_get(const decay_t *decay) { | ||
| 101 | return decay->npages_limit; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* How many unused dirty pages were generated during the last epoch. */ | ||
| 105 | static inline size_t | ||
| 106 | decay_epoch_npages_delta(const decay_t *decay) { | ||
| 107 | return decay->backlog[SMOOTHSTEP_NSTEPS - 1]; | ||
| 108 | } | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Current epoch duration, in nanoseconds. Given that new epochs are started | ||
| 112 | * somewhat haphazardly, this is not necessarily exactly the time between any | ||
| 113 | * two calls to decay_maybe_advance_epoch; see the comments on fields in the | ||
| 114 | * decay_t. | ||
| 115 | */ | ||
| 116 | static inline uint64_t | ||
| 117 | decay_epoch_duration_ns(const decay_t *decay) { | ||
| 118 | return nstime_ns(&decay->interval); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline bool | ||
| 122 | decay_immediately(const decay_t *decay) { | ||
| 123 | ssize_t decay_ms = decay_ms_read(decay); | ||
| 124 | return decay_ms == 0; | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline bool | ||
| 128 | decay_disabled(const decay_t *decay) { | ||
| 129 | ssize_t decay_ms = decay_ms_read(decay); | ||
| 130 | return decay_ms < 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | /* Returns true if decay is enabled and done gradually. */ | ||
| 134 | static inline bool | ||
| 135 | decay_gradually(const decay_t *decay) { | ||
| 136 | ssize_t decay_ms = decay_ms_read(decay); | ||
| 137 | return decay_ms > 0; | ||
| 138 | } | ||
| 139 | |||
| 140 | /* | ||
| 141 | * Returns true if the passed in decay time setting is valid. | ||
| 142 | * < -1 : invalid | ||
| 143 | * -1 : never decay | ||
| 144 | * 0 : decay immediately | ||
| 145 | * > 0 : some positive decay time, up to a maximum allowed value of | ||
| 146 | * NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early | ||
| 147 | * 27th century. By that time, we expect to have implemented alternate purging | ||
| 148 | * strategies. | ||
| 149 | */ | ||
| 150 | bool decay_ms_valid(ssize_t decay_ms); | ||
| 151 | |||
| 152 | /* | ||
| 153 | * As a precondition, the decay_t must be zeroed out (as if with memset). | ||
| 154 | * | ||
| 155 | * Returns true on error. | ||
| 156 | */ | ||
| 157 | bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms); | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Given an already-initialized decay_t, reinitialize it with the given decay | ||
| 161 | * time. The decay_t must have previously been initialized (and should not then | ||
| 162 | * be zeroed). | ||
| 163 | */ | ||
| 164 | void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms); | ||
| 165 | |||
| 166 | /* | ||
| 167 | * Compute how many of 'npages_new' pages we would need to purge in 'time'. | ||
| 168 | */ | ||
| 169 | uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time, | ||
| 170 | size_t npages_new); | ||
| 171 | |||
| 172 | /* Returns true if the epoch advanced and there are pages to purge. */ | ||
| 173 | bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, | ||
| 174 | size_t current_npages); | ||
| 175 | |||
| 176 | /* | ||
| 177 | * Calculates wait time until a number of pages in the interval | ||
| 178 | * [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged. | ||
| 179 | * | ||
| 180 | * Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of | ||
| 181 | * indefinite wait. | ||
| 182 | */ | ||
| 183 | uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current, | ||
| 184 | uint64_t npages_threshold); | ||
| 185 | |||
| 186 | #endif /* JEMALLOC_INTERNAL_DECAY_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/div.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/div.h deleted file mode 100644 index aebae93..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/div.h +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_DIV_H | ||
| 2 | #define JEMALLOC_INTERNAL_DIV_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/assert.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * This module does the division that computes the index of a region in a slab, | ||
| 8 | * given its offset relative to the base. | ||
| 9 | * That is, given a divisor d, an n = i * d (all integers), we'll return i. | ||
| 10 | * We do some pre-computation to do this more quickly than a CPU division | ||
| 11 | * instruction. | ||
| 12 | * We bound n < 2^32, and don't support dividing by one. | ||
| 13 | */ | ||
| 14 | |||
| 15 | typedef struct div_info_s div_info_t; | ||
| 16 | struct div_info_s { | ||
| 17 | uint32_t magic; | ||
| 18 | #ifdef JEMALLOC_DEBUG | ||
| 19 | size_t d; | ||
| 20 | #endif | ||
| 21 | }; | ||
| 22 | |||
| 23 | void div_init(div_info_t *div_info, size_t divisor); | ||
| 24 | |||
| 25 | static inline size_t | ||
| 26 | div_compute(div_info_t *div_info, size_t n) { | ||
| 27 | assert(n <= (uint32_t)-1); | ||
| 28 | /* | ||
| 29 | * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine, | ||
| 30 | * the compilers I tried were all smart enough to turn this into the | ||
| 31 | * appropriate "get the high 32 bits of the result of a multiply" (e.g. | ||
| 32 | * mul; mov edx eax; on x86, umull on arm, etc.). | ||
| 33 | */ | ||
| 34 | size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32; | ||
| 35 | #ifdef JEMALLOC_DEBUG | ||
| 36 | assert(i * div_info->d == n); | ||
| 37 | #endif | ||
| 38 | return i; | ||
| 39 | } | ||
| 40 | |||
| 41 | #endif /* JEMALLOC_INTERNAL_DIV_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ecache.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ecache.h deleted file mode 100644 index 71cae3e..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ecache.h +++ /dev/null | |||
| @@ -1,55 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ECACHE_H | ||
| 2 | #define JEMALLOC_INTERNAL_ECACHE_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/eset.h" | ||
| 5 | #include "jemalloc/internal/san.h" | ||
| 6 | #include "jemalloc/internal/mutex.h" | ||
| 7 | |||
| 8 | typedef struct ecache_s ecache_t; | ||
| 9 | struct ecache_s { | ||
| 10 | malloc_mutex_t mtx; | ||
| 11 | eset_t eset; | ||
| 12 | eset_t guarded_eset; | ||
| 13 | /* All stored extents must be in the same state. */ | ||
| 14 | extent_state_t state; | ||
| 15 | /* The index of the ehooks the ecache is associated with. */ | ||
| 16 | unsigned ind; | ||
| 17 | /* | ||
| 18 | * If true, delay coalescing until eviction; otherwise coalesce during | ||
| 19 | * deallocation. | ||
| 20 | */ | ||
| 21 | bool delay_coalesce; | ||
| 22 | }; | ||
| 23 | |||
| 24 | static inline size_t | ||
| 25 | ecache_npages_get(ecache_t *ecache) { | ||
| 26 | return eset_npages_get(&ecache->eset) + | ||
| 27 | eset_npages_get(&ecache->guarded_eset); | ||
| 28 | } | ||
| 29 | |||
| 30 | /* Get the number of extents in the given page size index. */ | ||
| 31 | static inline size_t | ||
| 32 | ecache_nextents_get(ecache_t *ecache, pszind_t ind) { | ||
| 33 | return eset_nextents_get(&ecache->eset, ind) + | ||
| 34 | eset_nextents_get(&ecache->guarded_eset, ind); | ||
| 35 | } | ||
| 36 | |||
| 37 | /* Get the sum total bytes of the extents in the given page size index. */ | ||
| 38 | static inline size_t | ||
| 39 | ecache_nbytes_get(ecache_t *ecache, pszind_t ind) { | ||
| 40 | return eset_nbytes_get(&ecache->eset, ind) + | ||
| 41 | eset_nbytes_get(&ecache->guarded_eset, ind); | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline unsigned | ||
| 45 | ecache_ind_get(ecache_t *ecache) { | ||
| 46 | return ecache->ind; | ||
| 47 | } | ||
| 48 | |||
| 49 | bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, | ||
| 50 | unsigned ind, bool delay_coalesce); | ||
| 51 | void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache); | ||
| 52 | void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache); | ||
| 53 | void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache); | ||
| 54 | |||
| 55 | #endif /* JEMALLOC_INTERNAL_ECACHE_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/edata.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/edata.h deleted file mode 100644 index af039ea..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/edata.h +++ /dev/null | |||
| @@ -1,698 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EDATA_H | ||
| 2 | #define JEMALLOC_INTERNAL_EDATA_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/bin_info.h" | ||
| 6 | #include "jemalloc/internal/bit_util.h" | ||
| 7 | #include "jemalloc/internal/hpdata.h" | ||
| 8 | #include "jemalloc/internal/nstime.h" | ||
| 9 | #include "jemalloc/internal/ph.h" | ||
| 10 | #include "jemalloc/internal/ql.h" | ||
| 11 | #include "jemalloc/internal/sc.h" | ||
| 12 | #include "jemalloc/internal/slab_data.h" | ||
| 13 | #include "jemalloc/internal/sz.h" | ||
| 14 | #include "jemalloc/internal/typed_list.h" | ||
| 15 | |||
| 16 | /* | ||
| 17 | * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment | ||
| 18 | * to free up the low bits in the rtree leaf. | ||
| 19 | */ | ||
| 20 | #define EDATA_ALIGNMENT 128 | ||
| 21 | |||
| 22 | enum extent_state_e { | ||
| 23 | extent_state_active = 0, | ||
| 24 | extent_state_dirty = 1, | ||
| 25 | extent_state_muzzy = 2, | ||
| 26 | extent_state_retained = 3, | ||
| 27 | extent_state_transition = 4, /* States below are intermediate. */ | ||
| 28 | extent_state_merging = 5, | ||
| 29 | extent_state_max = 5 /* Sanity checking only. */ | ||
| 30 | }; | ||
| 31 | typedef enum extent_state_e extent_state_t; | ||
| 32 | |||
| 33 | enum extent_head_state_e { | ||
| 34 | EXTENT_NOT_HEAD, | ||
| 35 | EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */ | ||
| 36 | }; | ||
| 37 | typedef enum extent_head_state_e extent_head_state_t; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Which implementation of the page allocator interface, (PAI, defined in | ||
| 41 | * pai.h) owns the given extent? | ||
| 42 | */ | ||
| 43 | enum extent_pai_e { | ||
| 44 | EXTENT_PAI_PAC = 0, | ||
| 45 | EXTENT_PAI_HPA = 1 | ||
| 46 | }; | ||
| 47 | typedef enum extent_pai_e extent_pai_t; | ||
| 48 | |||
| 49 | struct e_prof_info_s { | ||
| 50 | /* Time when this was allocated. */ | ||
| 51 | nstime_t e_prof_alloc_time; | ||
| 52 | /* Allocation request size. */ | ||
| 53 | size_t e_prof_alloc_size; | ||
| 54 | /* Points to a prof_tctx_t. */ | ||
| 55 | atomic_p_t e_prof_tctx; | ||
| 56 | /* | ||
| 57 | * Points to a prof_recent_t for the allocation; NULL | ||
| 58 | * means the recent allocation record no longer exists. | ||
| 59 | * Protected by prof_recent_alloc_mtx. | ||
| 60 | */ | ||
| 61 | atomic_p_t e_prof_recent_alloc; | ||
| 62 | }; | ||
| 63 | typedef struct e_prof_info_s e_prof_info_t; | ||
| 64 | |||
| 65 | /* | ||
| 66 | * The information about a particular edata that lives in an emap. Space is | ||
| 67 | * more precious there (the information, plus the edata pointer, has to live in | ||
| 68 | * a 64-bit word if we want to enable a packed representation. | ||
| 69 | * | ||
| 70 | * There are two things that are special about the information here: | ||
| 71 | * - It's quicker to access. You have one fewer pointer hop, since finding the | ||
| 72 | * edata_t associated with an item always requires accessing the rtree leaf in | ||
| 73 | * which this data is stored. | ||
| 74 | * - It can be read unsynchronized, and without worrying about lifetime issues. | ||
| 75 | */ | ||
| 76 | typedef struct edata_map_info_s edata_map_info_t; | ||
| 77 | struct edata_map_info_s { | ||
| 78 | bool slab; | ||
| 79 | szind_t szind; | ||
| 80 | }; | ||
| 81 | |||
| 82 | typedef struct edata_cmp_summary_s edata_cmp_summary_t; | ||
| 83 | struct edata_cmp_summary_s { | ||
| 84 | uint64_t sn; | ||
| 85 | uintptr_t addr; | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* Extent (span of pages). Use accessor functions for e_* fields. */ | ||
| 89 | typedef struct edata_s edata_t; | ||
| 90 | ph_structs(edata_avail, edata_t); | ||
| 91 | ph_structs(edata_heap, edata_t); | ||
| 92 | struct edata_s { | ||
| 93 | /* | ||
| 94 | * Bitfield containing several fields: | ||
| 95 | * | ||
| 96 | * a: arena_ind | ||
| 97 | * b: slab | ||
| 98 | * c: committed | ||
| 99 | * p: pai | ||
| 100 | * z: zeroed | ||
| 101 | * g: guarded | ||
| 102 | * t: state | ||
| 103 | * i: szind | ||
| 104 | * f: nfree | ||
| 105 | * s: bin_shard | ||
| 106 | * | ||
| 107 | * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa | ||
| 108 | * | ||
| 109 | * arena_ind: Arena from which this extent came, or all 1 bits if | ||
| 110 | * unassociated. | ||
| 111 | * | ||
| 112 | * slab: The slab flag indicates whether the extent is used for a slab | ||
| 113 | * of small regions. This helps differentiate small size classes, | ||
| 114 | * and it indicates whether interior pointers can be looked up via | ||
| 115 | * iealloc(). | ||
| 116 | * | ||
| 117 | * committed: The committed flag indicates whether physical memory is | ||
| 118 | * committed to the extent, whether explicitly or implicitly | ||
| 119 | * as on a system that overcommits and satisfies physical | ||
| 120 | * memory needs on demand via soft page faults. | ||
| 121 | * | ||
| 122 | * pai: The pai flag is an extent_pai_t. | ||
| 123 | * | ||
| 124 | * zeroed: The zeroed flag is used by extent recycling code to track | ||
| 125 | * whether memory is zero-filled. | ||
| 126 | * | ||
| 127 | * guarded: The guarded flag is use by the sanitizer to track whether | ||
| 128 | * the extent has page guards around it. | ||
| 129 | * | ||
| 130 | * state: The state flag is an extent_state_t. | ||
| 131 | * | ||
| 132 | * szind: The szind flag indicates usable size class index for | ||
| 133 | * allocations residing in this extent, regardless of whether the | ||
| 134 | * extent is a slab. Extent size and usable size often differ | ||
| 135 | * even for non-slabs, either due to sz_large_pad or promotion of | ||
| 136 | * sampled small regions. | ||
| 137 | * | ||
| 138 | * nfree: Number of free regions in slab. | ||
| 139 | * | ||
| 140 | * bin_shard: the shard of the bin from which this extent came. | ||
| 141 | */ | ||
| 142 | uint64_t e_bits; | ||
| 143 | #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) | ||
| 144 | |||
| 145 | #define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS | ||
| 146 | #define EDATA_BITS_ARENA_SHIFT 0 | ||
| 147 | #define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT) | ||
| 148 | |||
| 149 | #define EDATA_BITS_SLAB_WIDTH 1 | ||
| 150 | #define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT) | ||
| 151 | #define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT) | ||
| 152 | |||
| 153 | #define EDATA_BITS_COMMITTED_WIDTH 1 | ||
| 154 | #define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT) | ||
| 155 | #define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT) | ||
| 156 | |||
| 157 | #define EDATA_BITS_PAI_WIDTH 1 | ||
| 158 | #define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT) | ||
| 159 | #define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT) | ||
| 160 | |||
| 161 | #define EDATA_BITS_ZEROED_WIDTH 1 | ||
| 162 | #define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT) | ||
| 163 | #define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) | ||
| 164 | |||
| 165 | #define EDATA_BITS_GUARDED_WIDTH 1 | ||
| 166 | #define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT) | ||
| 167 | #define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT) | ||
| 168 | |||
| 169 | #define EDATA_BITS_STATE_WIDTH 3 | ||
| 170 | #define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT) | ||
| 171 | #define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT) | ||
| 172 | |||
| 173 | #define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) | ||
| 174 | #define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT) | ||
| 175 | #define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT) | ||
| 176 | |||
| 177 | #define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1) | ||
| 178 | #define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT) | ||
| 179 | #define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT) | ||
| 180 | |||
| 181 | #define EDATA_BITS_BINSHARD_WIDTH 6 | ||
| 182 | #define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT) | ||
| 183 | #define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT) | ||
| 184 | |||
| 185 | #define EDATA_BITS_IS_HEAD_WIDTH 1 | ||
| 186 | #define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT) | ||
| 187 | #define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT) | ||
| 188 | |||
| 189 | /* Pointer to the extent that this structure is responsible for. */ | ||
| 190 | void *e_addr; | ||
| 191 | |||
| 192 | union { | ||
| 193 | /* | ||
| 194 | * Extent size and serial number associated with the extent | ||
| 195 | * structure (different than the serial number for the extent at | ||
| 196 | * e_addr). | ||
| 197 | * | ||
| 198 | * ssssssss [...] ssssssss ssssnnnn nnnnnnnn | ||
| 199 | */ | ||
| 200 | size_t e_size_esn; | ||
| 201 | #define EDATA_SIZE_MASK ((size_t)~(PAGE-1)) | ||
| 202 | #define EDATA_ESN_MASK ((size_t)PAGE-1) | ||
| 203 | /* Base extent size, which may not be a multiple of PAGE. */ | ||
| 204 | size_t e_bsize; | ||
| 205 | }; | ||
| 206 | |||
| 207 | /* | ||
| 208 | * If this edata is a user allocation from an HPA, it comes out of some | ||
| 209 | * pageslab (we don't yet support huegpage allocations that don't fit | ||
| 210 | * into pageslabs). This tracks it. | ||
| 211 | */ | ||
| 212 | hpdata_t *e_ps; | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Serial number. These are not necessarily unique; splitting an extent | ||
| 216 | * results in two extents with the same serial number. | ||
| 217 | */ | ||
| 218 | uint64_t e_sn; | ||
| 219 | |||
| 220 | union { | ||
| 221 | /* | ||
| 222 | * List linkage used when the edata_t is active; either in | ||
| 223 | * arena's large allocations or bin_t's slabs_full. | ||
| 224 | */ | ||
| 225 | ql_elm(edata_t) ql_link_active; | ||
| 226 | /* | ||
| 227 | * Pairing heap linkage. Used whenever the extent is inactive | ||
| 228 | * (in the page allocators), or when it is active and in | ||
| 229 | * slabs_nonfull, or when the edata_t is unassociated with an | ||
| 230 | * extent and sitting in an edata_cache. | ||
| 231 | */ | ||
| 232 | union { | ||
| 233 | edata_heap_link_t heap_link; | ||
| 234 | edata_avail_link_t avail_link; | ||
| 235 | }; | ||
| 236 | }; | ||
| 237 | |||
| 238 | union { | ||
| 239 | /* | ||
| 240 | * List linkage used when the extent is inactive: | ||
| 241 | * - Stashed dirty extents | ||
| 242 | * - Ecache LRU functionality. | ||
| 243 | */ | ||
| 244 | ql_elm(edata_t) ql_link_inactive; | ||
| 245 | /* Small region slab metadata. */ | ||
| 246 | slab_data_t e_slab_data; | ||
| 247 | |||
| 248 | /* Profiling data, used for large objects. */ | ||
| 249 | e_prof_info_t e_prof_info; | ||
| 250 | }; | ||
| 251 | }; | ||
| 252 | |||
| 253 | TYPED_LIST(edata_list_active, edata_t, ql_link_active) | ||
| 254 | TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive) | ||
| 255 | |||
| 256 | static inline unsigned | ||
| 257 | edata_arena_ind_get(const edata_t *edata) { | ||
| 258 | unsigned arena_ind = (unsigned)((edata->e_bits & | ||
| 259 | EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT); | ||
| 260 | assert(arena_ind < MALLOCX_ARENA_LIMIT); | ||
| 261 | |||
| 262 | return arena_ind; | ||
| 263 | } | ||
| 264 | |||
| 265 | static inline szind_t | ||
| 266 | edata_szind_get_maybe_invalid(const edata_t *edata) { | ||
| 267 | szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >> | ||
| 268 | EDATA_BITS_SZIND_SHIFT); | ||
| 269 | assert(szind <= SC_NSIZES); | ||
| 270 | return szind; | ||
| 271 | } | ||
| 272 | |||
| 273 | static inline szind_t | ||
| 274 | edata_szind_get(const edata_t *edata) { | ||
| 275 | szind_t szind = edata_szind_get_maybe_invalid(edata); | ||
| 276 | assert(szind < SC_NSIZES); /* Never call when "invalid". */ | ||
| 277 | return szind; | ||
| 278 | } | ||
| 279 | |||
| 280 | static inline size_t | ||
| 281 | edata_usize_get(const edata_t *edata) { | ||
| 282 | return sz_index2size(edata_szind_get(edata)); | ||
| 283 | } | ||
| 284 | |||
| 285 | static inline unsigned | ||
| 286 | edata_binshard_get(const edata_t *edata) { | ||
| 287 | unsigned binshard = (unsigned)((edata->e_bits & | ||
| 288 | EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT); | ||
| 289 | assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); | ||
| 290 | return binshard; | ||
| 291 | } | ||
| 292 | |||
| 293 | static inline uint64_t | ||
| 294 | edata_sn_get(const edata_t *edata) { | ||
| 295 | return edata->e_sn; | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline extent_state_t | ||
| 299 | edata_state_get(const edata_t *edata) { | ||
| 300 | return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >> | ||
| 301 | EDATA_BITS_STATE_SHIFT); | ||
| 302 | } | ||
| 303 | |||
| 304 | static inline bool | ||
| 305 | edata_guarded_get(const edata_t *edata) { | ||
| 306 | return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >> | ||
| 307 | EDATA_BITS_GUARDED_SHIFT); | ||
| 308 | } | ||
| 309 | |||
| 310 | static inline bool | ||
| 311 | edata_zeroed_get(const edata_t *edata) { | ||
| 312 | return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >> | ||
| 313 | EDATA_BITS_ZEROED_SHIFT); | ||
| 314 | } | ||
| 315 | |||
| 316 | static inline bool | ||
| 317 | edata_committed_get(const edata_t *edata) { | ||
| 318 | return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >> | ||
| 319 | EDATA_BITS_COMMITTED_SHIFT); | ||
| 320 | } | ||
| 321 | |||
| 322 | static inline extent_pai_t | ||
| 323 | edata_pai_get(const edata_t *edata) { | ||
| 324 | return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >> | ||
| 325 | EDATA_BITS_PAI_SHIFT); | ||
| 326 | } | ||
| 327 | |||
| 328 | static inline bool | ||
| 329 | edata_slab_get(const edata_t *edata) { | ||
| 330 | return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >> | ||
| 331 | EDATA_BITS_SLAB_SHIFT); | ||
| 332 | } | ||
| 333 | |||
| 334 | static inline unsigned | ||
| 335 | edata_nfree_get(const edata_t *edata) { | ||
| 336 | assert(edata_slab_get(edata)); | ||
| 337 | return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >> | ||
| 338 | EDATA_BITS_NFREE_SHIFT); | ||
| 339 | } | ||
| 340 | |||
| 341 | static inline void * | ||
| 342 | edata_base_get(const edata_t *edata) { | ||
| 343 | assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || | ||
| 344 | !edata_slab_get(edata)); | ||
| 345 | return PAGE_ADDR2BASE(edata->e_addr); | ||
| 346 | } | ||
| 347 | |||
| 348 | static inline void * | ||
| 349 | edata_addr_get(const edata_t *edata) { | ||
| 350 | assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || | ||
| 351 | !edata_slab_get(edata)); | ||
| 352 | return edata->e_addr; | ||
| 353 | } | ||
| 354 | |||
| 355 | static inline size_t | ||
| 356 | edata_size_get(const edata_t *edata) { | ||
| 357 | return (edata->e_size_esn & EDATA_SIZE_MASK); | ||
| 358 | } | ||
| 359 | |||
| 360 | static inline size_t | ||
| 361 | edata_esn_get(const edata_t *edata) { | ||
| 362 | return (edata->e_size_esn & EDATA_ESN_MASK); | ||
| 363 | } | ||
| 364 | |||
| 365 | static inline size_t | ||
| 366 | edata_bsize_get(const edata_t *edata) { | ||
| 367 | return edata->e_bsize; | ||
| 368 | } | ||
| 369 | |||
| 370 | static inline hpdata_t * | ||
| 371 | edata_ps_get(const edata_t *edata) { | ||
| 372 | assert(edata_pai_get(edata) == EXTENT_PAI_HPA); | ||
| 373 | return edata->e_ps; | ||
| 374 | } | ||
| 375 | |||
| 376 | static inline void * | ||
| 377 | edata_before_get(const edata_t *edata) { | ||
| 378 | return (void *)((uintptr_t)edata_base_get(edata) - PAGE); | ||
| 379 | } | ||
| 380 | |||
| 381 | static inline void * | ||
| 382 | edata_last_get(const edata_t *edata) { | ||
| 383 | return (void *)((uintptr_t)edata_base_get(edata) + | ||
| 384 | edata_size_get(edata) - PAGE); | ||
| 385 | } | ||
| 386 | |||
| 387 | static inline void * | ||
| 388 | edata_past_get(const edata_t *edata) { | ||
| 389 | return (void *)((uintptr_t)edata_base_get(edata) + | ||
| 390 | edata_size_get(edata)); | ||
| 391 | } | ||
| 392 | |||
| 393 | static inline slab_data_t * | ||
| 394 | edata_slab_data_get(edata_t *edata) { | ||
| 395 | assert(edata_slab_get(edata)); | ||
| 396 | return &edata->e_slab_data; | ||
| 397 | } | ||
| 398 | |||
| 399 | static inline const slab_data_t * | ||
| 400 | edata_slab_data_get_const(const edata_t *edata) { | ||
| 401 | assert(edata_slab_get(edata)); | ||
| 402 | return &edata->e_slab_data; | ||
| 403 | } | ||
| 404 | |||
| 405 | static inline prof_tctx_t * | ||
| 406 | edata_prof_tctx_get(const edata_t *edata) { | ||
| 407 | return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx, | ||
| 408 | ATOMIC_ACQUIRE); | ||
| 409 | } | ||
| 410 | |||
| 411 | static inline const nstime_t * | ||
| 412 | edata_prof_alloc_time_get(const edata_t *edata) { | ||
| 413 | return &edata->e_prof_info.e_prof_alloc_time; | ||
| 414 | } | ||
| 415 | |||
| 416 | static inline size_t | ||
| 417 | edata_prof_alloc_size_get(const edata_t *edata) { | ||
| 418 | return edata->e_prof_info.e_prof_alloc_size; | ||
| 419 | } | ||
| 420 | |||
| 421 | static inline prof_recent_t * | ||
| 422 | edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) { | ||
| 423 | return (prof_recent_t *)atomic_load_p( | ||
| 424 | &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED); | ||
| 425 | } | ||
| 426 | |||
| 427 | static inline void | ||
| 428 | edata_arena_ind_set(edata_t *edata, unsigned arena_ind) { | ||
| 429 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) | | ||
| 430 | ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT); | ||
| 431 | } | ||
| 432 | |||
| 433 | static inline void | ||
| 434 | edata_binshard_set(edata_t *edata, unsigned binshard) { | ||
| 435 | /* The assertion assumes szind is set already. */ | ||
| 436 | assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); | ||
| 437 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) | | ||
| 438 | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT); | ||
| 439 | } | ||
| 440 | |||
| 441 | static inline void | ||
| 442 | edata_addr_set(edata_t *edata, void *addr) { | ||
| 443 | edata->e_addr = addr; | ||
| 444 | } | ||
| 445 | |||
| 446 | static inline void | ||
| 447 | edata_size_set(edata_t *edata, size_t size) { | ||
| 448 | assert((size & ~EDATA_SIZE_MASK) == 0); | ||
| 449 | edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK); | ||
| 450 | } | ||
| 451 | |||
| 452 | static inline void | ||
| 453 | edata_esn_set(edata_t *edata, size_t esn) { | ||
| 454 | edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn & | ||
| 455 | EDATA_ESN_MASK); | ||
| 456 | } | ||
| 457 | |||
| 458 | static inline void | ||
| 459 | edata_bsize_set(edata_t *edata, size_t bsize) { | ||
| 460 | edata->e_bsize = bsize; | ||
| 461 | } | ||
| 462 | |||
| 463 | static inline void | ||
| 464 | edata_ps_set(edata_t *edata, hpdata_t *ps) { | ||
| 465 | assert(edata_pai_get(edata) == EXTENT_PAI_HPA); | ||
| 466 | edata->e_ps = ps; | ||
| 467 | } | ||
| 468 | |||
| 469 | static inline void | ||
| 470 | edata_szind_set(edata_t *edata, szind_t szind) { | ||
| 471 | assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ | ||
| 472 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) | | ||
| 473 | ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT); | ||
| 474 | } | ||
| 475 | |||
| 476 | static inline void | ||
| 477 | edata_nfree_set(edata_t *edata, unsigned nfree) { | ||
| 478 | assert(edata_slab_get(edata)); | ||
| 479 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) | | ||
| 480 | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); | ||
| 481 | } | ||
| 482 | |||
| 483 | static inline void | ||
| 484 | edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) { | ||
| 485 | /* The assertion assumes szind is set already. */ | ||
| 486 | assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); | ||
| 487 | edata->e_bits = (edata->e_bits & | ||
| 488 | (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) | | ||
| 489 | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) | | ||
| 490 | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); | ||
| 491 | } | ||
| 492 | |||
| 493 | static inline void | ||
| 494 | edata_nfree_inc(edata_t *edata) { | ||
| 495 | assert(edata_slab_get(edata)); | ||
| 496 | edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); | ||
| 497 | } | ||
| 498 | |||
| 499 | static inline void | ||
| 500 | edata_nfree_dec(edata_t *edata) { | ||
| 501 | assert(edata_slab_get(edata)); | ||
| 502 | edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); | ||
| 503 | } | ||
| 504 | |||
| 505 | static inline void | ||
| 506 | edata_nfree_sub(edata_t *edata, uint64_t n) { | ||
| 507 | assert(edata_slab_get(edata)); | ||
| 508 | edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT); | ||
| 509 | } | ||
| 510 | |||
| 511 | static inline void | ||
| 512 | edata_sn_set(edata_t *edata, uint64_t sn) { | ||
| 513 | edata->e_sn = sn; | ||
| 514 | } | ||
| 515 | |||
| 516 | static inline void | ||
| 517 | edata_state_set(edata_t *edata, extent_state_t state) { | ||
| 518 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) | | ||
| 519 | ((uint64_t)state << EDATA_BITS_STATE_SHIFT); | ||
| 520 | } | ||
| 521 | |||
| 522 | static inline void | ||
| 523 | edata_guarded_set(edata_t *edata, bool guarded) { | ||
| 524 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) | | ||
| 525 | ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT); | ||
| 526 | } | ||
| 527 | |||
| 528 | static inline void | ||
| 529 | edata_zeroed_set(edata_t *edata, bool zeroed) { | ||
| 530 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) | | ||
| 531 | ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT); | ||
| 532 | } | ||
| 533 | |||
| 534 | static inline void | ||
| 535 | edata_committed_set(edata_t *edata, bool committed) { | ||
| 536 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) | | ||
| 537 | ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT); | ||
| 538 | } | ||
| 539 | |||
| 540 | static inline void | ||
| 541 | edata_pai_set(edata_t *edata, extent_pai_t pai) { | ||
| 542 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) | | ||
| 543 | ((uint64_t)pai << EDATA_BITS_PAI_SHIFT); | ||
| 544 | } | ||
| 545 | |||
| 546 | static inline void | ||
| 547 | edata_slab_set(edata_t *edata, bool slab) { | ||
| 548 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) | | ||
| 549 | ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT); | ||
| 550 | } | ||
| 551 | |||
| 552 | static inline void | ||
| 553 | edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) { | ||
| 554 | atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE); | ||
| 555 | } | ||
| 556 | |||
| 557 | static inline void | ||
| 558 | edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) { | ||
| 559 | nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t); | ||
| 560 | } | ||
| 561 | |||
| 562 | static inline void | ||
| 563 | edata_prof_alloc_size_set(edata_t *edata, size_t size) { | ||
| 564 | edata->e_prof_info.e_prof_alloc_size = size; | ||
| 565 | } | ||
| 566 | |||
| 567 | static inline void | ||
| 568 | edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata, | ||
| 569 | prof_recent_t *recent_alloc) { | ||
| 570 | atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc, | ||
| 571 | ATOMIC_RELAXED); | ||
| 572 | } | ||
| 573 | |||
| 574 | static inline bool | ||
| 575 | edata_is_head_get(edata_t *edata) { | ||
| 576 | return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >> | ||
| 577 | EDATA_BITS_IS_HEAD_SHIFT); | ||
| 578 | } | ||
| 579 | |||
| 580 | static inline void | ||
| 581 | edata_is_head_set(edata_t *edata, bool is_head) { | ||
| 582 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) | | ||
| 583 | ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT); | ||
| 584 | } | ||
| 585 | |||
| 586 | static inline bool | ||
| 587 | edata_state_in_transition(extent_state_t state) { | ||
| 588 | return state >= extent_state_transition; | ||
| 589 | } | ||
| 590 | |||
| 591 | /* | ||
| 592 | * Because this function is implemented as a sequence of bitfield modifications, | ||
| 593 | * even though each individual bit is properly initialized, we technically read | ||
| 594 | * uninitialized data within it. This is mostly fine, since most callers get | ||
| 595 | * their edatas from zeroing sources, but callers who make stack edata_ts need | ||
| 596 | * to manually zero them. | ||
| 597 | */ | ||
| 598 | static inline void | ||
| 599 | edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size, | ||
| 600 | bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed, | ||
| 601 | bool committed, extent_pai_t pai, extent_head_state_t is_head) { | ||
| 602 | assert(addr == PAGE_ADDR2BASE(addr) || !slab); | ||
| 603 | |||
| 604 | edata_arena_ind_set(edata, arena_ind); | ||
| 605 | edata_addr_set(edata, addr); | ||
| 606 | edata_size_set(edata, size); | ||
| 607 | edata_slab_set(edata, slab); | ||
| 608 | edata_szind_set(edata, szind); | ||
| 609 | edata_sn_set(edata, sn); | ||
| 610 | edata_state_set(edata, state); | ||
| 611 | edata_guarded_set(edata, false); | ||
| 612 | edata_zeroed_set(edata, zeroed); | ||
| 613 | edata_committed_set(edata, committed); | ||
| 614 | edata_pai_set(edata, pai); | ||
| 615 | edata_is_head_set(edata, is_head == EXTENT_IS_HEAD); | ||
| 616 | if (config_prof) { | ||
| 617 | edata_prof_tctx_set(edata, NULL); | ||
| 618 | } | ||
| 619 | } | ||
| 620 | |||
| 621 | static inline void | ||
| 622 | edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) { | ||
| 623 | edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1); | ||
| 624 | edata_addr_set(edata, addr); | ||
| 625 | edata_bsize_set(edata, bsize); | ||
| 626 | edata_slab_set(edata, false); | ||
| 627 | edata_szind_set(edata, SC_NSIZES); | ||
| 628 | edata_sn_set(edata, sn); | ||
| 629 | edata_state_set(edata, extent_state_active); | ||
| 630 | edata_guarded_set(edata, false); | ||
| 631 | edata_zeroed_set(edata, true); | ||
| 632 | edata_committed_set(edata, true); | ||
| 633 | /* | ||
| 634 | * This isn't strictly true, but base allocated extents never get | ||
| 635 | * deallocated and can't be looked up in the emap, but no sense in | ||
| 636 | * wasting a state bit to encode this fact. | ||
| 637 | */ | ||
| 638 | edata_pai_set(edata, EXTENT_PAI_PAC); | ||
| 639 | } | ||
| 640 | |||
| 641 | static inline int | ||
| 642 | edata_esn_comp(const edata_t *a, const edata_t *b) { | ||
| 643 | size_t a_esn = edata_esn_get(a); | ||
| 644 | size_t b_esn = edata_esn_get(b); | ||
| 645 | |||
| 646 | return (a_esn > b_esn) - (a_esn < b_esn); | ||
| 647 | } | ||
| 648 | |||
| 649 | static inline int | ||
| 650 | edata_ead_comp(const edata_t *a, const edata_t *b) { | ||
| 651 | uintptr_t a_eaddr = (uintptr_t)a; | ||
| 652 | uintptr_t b_eaddr = (uintptr_t)b; | ||
| 653 | |||
| 654 | return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); | ||
| 655 | } | ||
| 656 | |||
| 657 | static inline edata_cmp_summary_t | ||
| 658 | edata_cmp_summary_get(const edata_t *edata) { | ||
| 659 | return (edata_cmp_summary_t){edata_sn_get(edata), | ||
| 660 | (uintptr_t)edata_addr_get(edata)}; | ||
| 661 | } | ||
| 662 | |||
| 663 | static inline int | ||
| 664 | edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) { | ||
| 665 | int ret; | ||
| 666 | ret = (a.sn > b.sn) - (a.sn < b.sn); | ||
| 667 | if (ret != 0) { | ||
| 668 | return ret; | ||
| 669 | } | ||
| 670 | ret = (a.addr > b.addr) - (a.addr < b.addr); | ||
| 671 | return ret; | ||
| 672 | } | ||
| 673 | |||
| 674 | static inline int | ||
| 675 | edata_snad_comp(const edata_t *a, const edata_t *b) { | ||
| 676 | edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a); | ||
| 677 | edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b); | ||
| 678 | |||
| 679 | return edata_cmp_summary_comp(a_cmp, b_cmp); | ||
| 680 | } | ||
| 681 | |||
| 682 | static inline int | ||
| 683 | edata_esnead_comp(const edata_t *a, const edata_t *b) { | ||
| 684 | int ret; | ||
| 685 | |||
| 686 | ret = edata_esn_comp(a, b); | ||
| 687 | if (ret != 0) { | ||
| 688 | return ret; | ||
| 689 | } | ||
| 690 | |||
| 691 | ret = edata_ead_comp(a, b); | ||
| 692 | return ret; | ||
| 693 | } | ||
| 694 | |||
| 695 | ph_proto(, edata_avail, edata_t) | ||
| 696 | ph_proto(, edata_heap, edata_t) | ||
| 697 | |||
| 698 | #endif /* JEMALLOC_INTERNAL_EDATA_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/edata_cache.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/edata_cache.h deleted file mode 100644 index 8b6c0ef..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/edata_cache.h +++ /dev/null | |||
| @@ -1,49 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H | ||
| 2 | #define JEMALLOC_INTERNAL_EDATA_CACHE_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/base.h" | ||
| 5 | |||
| 6 | /* For tests only. */ | ||
| 7 | #define EDATA_CACHE_FAST_FILL 4 | ||
| 8 | |||
| 9 | /* | ||
| 10 | * A cache of edata_t structures allocated via base_alloc_edata (as opposed to | ||
| 11 | * the underlying extents they describe). The contents of returned edata_t | ||
| 12 | * objects are garbage and cannot be relied upon. | ||
| 13 | */ | ||
| 14 | |||
| 15 | typedef struct edata_cache_s edata_cache_t; | ||
| 16 | struct edata_cache_s { | ||
| 17 | edata_avail_t avail; | ||
| 18 | atomic_zu_t count; | ||
| 19 | malloc_mutex_t mtx; | ||
| 20 | base_t *base; | ||
| 21 | }; | ||
| 22 | |||
| 23 | bool edata_cache_init(edata_cache_t *edata_cache, base_t *base); | ||
| 24 | edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache); | ||
| 25 | void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata); | ||
| 26 | |||
| 27 | void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache); | ||
| 28 | void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache); | ||
| 29 | void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache); | ||
| 30 | |||
| 31 | /* | ||
| 32 | * An edata_cache_small is like an edata_cache, but it relies on external | ||
| 33 | * synchronization and avoids first-fit strategies. | ||
| 34 | */ | ||
| 35 | |||
| 36 | typedef struct edata_cache_fast_s edata_cache_fast_t; | ||
| 37 | struct edata_cache_fast_s { | ||
| 38 | edata_list_inactive_t list; | ||
| 39 | edata_cache_t *fallback; | ||
| 40 | bool disabled; | ||
| 41 | }; | ||
| 42 | |||
| 43 | void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback); | ||
| 44 | edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs); | ||
| 45 | void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, | ||
| 46 | edata_t *edata); | ||
| 47 | void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs); | ||
| 48 | |||
| 49 | #endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ehooks.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ehooks.h deleted file mode 100644 index 8d9513e..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ehooks.h +++ /dev/null | |||
| @@ -1,412 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EHOOKS_H | ||
| 2 | #define JEMALLOC_INTERNAL_EHOOKS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/extent_mmap.h" | ||
| 6 | |||
| 7 | /* | ||
| 8 | * This module is the internal interface to the extent hooks (both | ||
| 9 | * user-specified and external). Eventually, this will give us the flexibility | ||
| 10 | * to use multiple different versions of user-visible extent-hook APIs under a | ||
| 11 | * single user interface. | ||
| 12 | * | ||
| 13 | * Current API expansions (not available to anyone but the default hooks yet): | ||
| 14 | * - Head state tracking. Hooks can decide whether or not to merge two | ||
| 15 | * extents based on whether or not one of them is the head (i.e. was | ||
| 16 | * allocated on its own). The later extent loses its "head" status. | ||
| 17 | */ | ||
| 18 | |||
| 19 | extern const extent_hooks_t ehooks_default_extent_hooks; | ||
| 20 | |||
| 21 | typedef struct ehooks_s ehooks_t; | ||
| 22 | struct ehooks_s { | ||
| 23 | /* | ||
| 24 | * The user-visible id that goes with the ehooks (i.e. that of the base | ||
| 25 | * they're a part of, the associated arena's index within the arenas | ||
| 26 | * array). | ||
| 27 | */ | ||
| 28 | unsigned ind; | ||
| 29 | /* Logically an extent_hooks_t *. */ | ||
| 30 | atomic_p_t ptr; | ||
| 31 | }; | ||
| 32 | |||
| 33 | extern const extent_hooks_t ehooks_default_extent_hooks; | ||
| 34 | |||
| 35 | /* | ||
| 36 | * These are not really part of the public API. Each hook has a fast-path for | ||
| 37 | * the default-hooks case that can avoid various small inefficiencies: | ||
| 38 | * - Forgetting tsd and then calling tsd_get within the hook. | ||
| 39 | * - Getting more state than necessary out of the extent_t. | ||
| 40 | * - Doing arena_ind -> arena -> arena_ind lookups. | ||
| 41 | * By making the calls to these functions visible to the compiler, it can move | ||
| 42 | * those extra bits of computation down below the fast-paths where they get ignored. | ||
| 43 | */ | ||
| 44 | void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size, | ||
| 45 | size_t alignment, bool *zero, bool *commit, unsigned arena_ind); | ||
| 46 | bool ehooks_default_dalloc_impl(void *addr, size_t size); | ||
| 47 | void ehooks_default_destroy_impl(void *addr, size_t size); | ||
| 48 | bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length); | ||
| 49 | bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length); | ||
| 50 | #ifdef PAGES_CAN_PURGE_LAZY | ||
| 51 | bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length); | ||
| 52 | #endif | ||
| 53 | #ifdef PAGES_CAN_PURGE_FORCED | ||
| 54 | bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length); | ||
| 55 | #endif | ||
| 56 | bool ehooks_default_split_impl(); | ||
| 57 | /* | ||
| 58 | * Merge is the only default extent hook we declare -- see the comment in | ||
| 59 | * ehooks_merge. | ||
| 60 | */ | ||
| 61 | bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, | ||
| 62 | size_t size_a, void *addr_b, size_t size_b, bool committed, | ||
| 63 | unsigned arena_ind); | ||
| 64 | bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b); | ||
| 65 | void ehooks_default_zero_impl(void *addr, size_t size); | ||
| 66 | void ehooks_default_guard_impl(void *guard1, void *guard2); | ||
| 67 | void ehooks_default_unguard_impl(void *guard1, void *guard2); | ||
| 68 | |||
| 69 | /* | ||
| 70 | * We don't officially support reentrancy from wtihin the extent hooks. But | ||
| 71 | * various people who sit within throwing distance of the jemalloc team want | ||
| 72 | * that functionality in certain limited cases. The default reentrancy guards | ||
| 73 | * assert that we're not reentrant from a0 (since it's the bootstrap arena, | ||
| 74 | * where reentrant allocations would be redirected), which we would incorrectly | ||
| 75 | * trigger in cases where a0 has extent hooks (those hooks themselves can't be | ||
| 76 | * reentrant, then, but there are reasonable uses for such functionality, like | ||
| 77 | * putting internal metadata on hugepages). Therefore, we use the raw | ||
| 78 | * reentrancy guards. | ||
| 79 | * | ||
| 80 | * Eventually, we need to think more carefully about whether and where we | ||
| 81 | * support allocating from within extent hooks (and what that means for things | ||
| 82 | * like profiling, stats collection, etc.), and document what the guarantee is. | ||
| 83 | */ | ||
| 84 | static inline void | ||
| 85 | ehooks_pre_reentrancy(tsdn_t *tsdn) { | ||
| 86 | tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); | ||
| 87 | tsd_pre_reentrancy_raw(tsd); | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline void | ||
| 91 | ehooks_post_reentrancy(tsdn_t *tsdn) { | ||
| 92 | tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); | ||
| 93 | tsd_post_reentrancy_raw(tsd); | ||
| 94 | } | ||
| 95 | |||
| 96 | /* Beginning of the public API. */ | ||
| 97 | void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind); | ||
| 98 | |||
| 99 | static inline unsigned | ||
| 100 | ehooks_ind_get(const ehooks_t *ehooks) { | ||
| 101 | return ehooks->ind; | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline void | ||
| 105 | ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) { | ||
| 106 | atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE); | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline extent_hooks_t * | ||
| 110 | ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) { | ||
| 111 | return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE); | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline bool | ||
| 115 | ehooks_are_default(ehooks_t *ehooks) { | ||
| 116 | return ehooks_get_extent_hooks_ptr(ehooks) == | ||
| 117 | &ehooks_default_extent_hooks; | ||
| 118 | } | ||
| 119 | |||
| 120 | /* | ||
| 121 | * In some cases, a caller needs to allocate resources before attempting to call | ||
| 122 | * a hook. If that hook is doomed to fail, this is wasteful. We therefore | ||
| 123 | * include some checks for such cases. | ||
| 124 | */ | ||
| 125 | static inline bool | ||
| 126 | ehooks_dalloc_will_fail(ehooks_t *ehooks) { | ||
| 127 | if (ehooks_are_default(ehooks)) { | ||
| 128 | return opt_retain; | ||
| 129 | } else { | ||
| 130 | return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL; | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline bool | ||
| 135 | ehooks_split_will_fail(ehooks_t *ehooks) { | ||
| 136 | return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL; | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline bool | ||
| 140 | ehooks_merge_will_fail(ehooks_t *ehooks) { | ||
| 141 | return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL; | ||
| 142 | } | ||
| 143 | |||
| 144 | static inline bool | ||
| 145 | ehooks_guard_will_fail(ehooks_t *ehooks) { | ||
| 146 | /* | ||
| 147 | * Before the guard hooks are officially introduced, limit the use to | ||
| 148 | * the default hooks only. | ||
| 149 | */ | ||
| 150 | return !ehooks_are_default(ehooks); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Some hooks are required to return zeroed memory in certain situations. In | ||
| 155 | * debug mode, we do some heuristic checks that they did what they were supposed | ||
| 156 | * to. | ||
| 157 | * | ||
| 158 | * This isn't really ehooks-specific (i.e. anyone can check for zeroed memory). | ||
| 159 | * But incorrect zero information indicates an ehook bug. | ||
| 160 | */ | ||
| 161 | static inline void | ||
| 162 | ehooks_debug_zero_check(void *addr, size_t size) { | ||
| 163 | assert(((uintptr_t)addr & PAGE_MASK) == 0); | ||
| 164 | assert((size & PAGE_MASK) == 0); | ||
| 165 | assert(size > 0); | ||
| 166 | if (config_debug) { | ||
| 167 | /* Check the whole first page. */ | ||
| 168 | size_t *p = (size_t *)addr; | ||
| 169 | for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { | ||
| 170 | assert(p[i] == 0); | ||
| 171 | } | ||
| 172 | /* | ||
| 173 | * And 4 spots within. There's a tradeoff here; the larger | ||
| 174 | * this number, the more likely it is that we'll catch a bug | ||
| 175 | * where ehooks return a sparsely non-zero range. But | ||
| 176 | * increasing the number of checks also increases the number of | ||
| 177 | * page faults in debug mode. FreeBSD does much of their | ||
| 178 | * day-to-day development work in debug mode, so we don't want | ||
| 179 | * even the debug builds to be too slow. | ||
| 180 | */ | ||
| 181 | const size_t nchecks = 4; | ||
| 182 | assert(PAGE >= sizeof(size_t) * nchecks); | ||
| 183 | for (size_t i = 0; i < nchecks; ++i) { | ||
| 184 | assert(p[i * (size / sizeof(size_t) / nchecks)] == 0); | ||
| 185 | } | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | |||
| 190 | static inline void * | ||
| 191 | ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size, | ||
| 192 | size_t alignment, bool *zero, bool *commit) { | ||
| 193 | bool orig_zero = *zero; | ||
| 194 | void *ret; | ||
| 195 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 196 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 197 | ret = ehooks_default_alloc_impl(tsdn, new_addr, size, | ||
| 198 | alignment, zero, commit, ehooks_ind_get(ehooks)); | ||
| 199 | } else { | ||
| 200 | ehooks_pre_reentrancy(tsdn); | ||
| 201 | ret = extent_hooks->alloc(extent_hooks, new_addr, size, | ||
| 202 | alignment, zero, commit, ehooks_ind_get(ehooks)); | ||
| 203 | ehooks_post_reentrancy(tsdn); | ||
| 204 | } | ||
| 205 | assert(new_addr == NULL || ret == NULL || new_addr == ret); | ||
| 206 | assert(!orig_zero || *zero); | ||
| 207 | if (*zero && ret != NULL) { | ||
| 208 | ehooks_debug_zero_check(ret, size); | ||
| 209 | } | ||
| 210 | return ret; | ||
| 211 | } | ||
| 212 | |||
| 213 | static inline bool | ||
| 214 | ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, | ||
| 215 | bool committed) { | ||
| 216 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 217 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 218 | return ehooks_default_dalloc_impl(addr, size); | ||
| 219 | } else if (extent_hooks->dalloc == NULL) { | ||
| 220 | return true; | ||
| 221 | } else { | ||
| 222 | ehooks_pre_reentrancy(tsdn); | ||
| 223 | bool err = extent_hooks->dalloc(extent_hooks, addr, size, | ||
| 224 | committed, ehooks_ind_get(ehooks)); | ||
| 225 | ehooks_post_reentrancy(tsdn); | ||
| 226 | return err; | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | static inline void | ||
| 231 | ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, | ||
| 232 | bool committed) { | ||
| 233 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 234 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 235 | ehooks_default_destroy_impl(addr, size); | ||
| 236 | } else if (extent_hooks->destroy == NULL) { | ||
| 237 | /* Do nothing. */ | ||
| 238 | } else { | ||
| 239 | ehooks_pre_reentrancy(tsdn); | ||
| 240 | extent_hooks->destroy(extent_hooks, addr, size, committed, | ||
| 241 | ehooks_ind_get(ehooks)); | ||
| 242 | ehooks_post_reentrancy(tsdn); | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | static inline bool | ||
| 247 | ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, | ||
| 248 | size_t offset, size_t length) { | ||
| 249 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 250 | bool err; | ||
| 251 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 252 | err = ehooks_default_commit_impl(addr, offset, length); | ||
| 253 | } else if (extent_hooks->commit == NULL) { | ||
| 254 | err = true; | ||
| 255 | } else { | ||
| 256 | ehooks_pre_reentrancy(tsdn); | ||
| 257 | err = extent_hooks->commit(extent_hooks, addr, size, | ||
| 258 | offset, length, ehooks_ind_get(ehooks)); | ||
| 259 | ehooks_post_reentrancy(tsdn); | ||
| 260 | } | ||
| 261 | if (!err) { | ||
| 262 | ehooks_debug_zero_check(addr, size); | ||
| 263 | } | ||
| 264 | return err; | ||
| 265 | } | ||
| 266 | |||
| 267 | static inline bool | ||
| 268 | ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, | ||
| 269 | size_t offset, size_t length) { | ||
| 270 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 271 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 272 | return ehooks_default_decommit_impl(addr, offset, length); | ||
| 273 | } else if (extent_hooks->decommit == NULL) { | ||
| 274 | return true; | ||
| 275 | } else { | ||
| 276 | ehooks_pre_reentrancy(tsdn); | ||
| 277 | bool err = extent_hooks->decommit(extent_hooks, addr, size, | ||
| 278 | offset, length, ehooks_ind_get(ehooks)); | ||
| 279 | ehooks_post_reentrancy(tsdn); | ||
| 280 | return err; | ||
| 281 | } | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline bool | ||
| 285 | ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, | ||
| 286 | size_t offset, size_t length) { | ||
| 287 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 288 | #ifdef PAGES_CAN_PURGE_LAZY | ||
| 289 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 290 | return ehooks_default_purge_lazy_impl(addr, offset, length); | ||
| 291 | } | ||
| 292 | #endif | ||
| 293 | if (extent_hooks->purge_lazy == NULL) { | ||
| 294 | return true; | ||
| 295 | } else { | ||
| 296 | ehooks_pre_reentrancy(tsdn); | ||
| 297 | bool err = extent_hooks->purge_lazy(extent_hooks, addr, size, | ||
| 298 | offset, length, ehooks_ind_get(ehooks)); | ||
| 299 | ehooks_post_reentrancy(tsdn); | ||
| 300 | return err; | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 304 | static inline bool | ||
| 305 | ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, | ||
| 306 | size_t offset, size_t length) { | ||
| 307 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 308 | /* | ||
| 309 | * It would be correct to have a ehooks_debug_zero_check call at the end | ||
| 310 | * of this function; purge_forced is required to zero. But checking | ||
| 311 | * would touch the page in question, which may have performance | ||
| 312 | * consequences (imagine the hooks are using hugepages, with a global | ||
| 313 | * zero page off). Even in debug mode, it's usually a good idea to | ||
| 314 | * avoid cases that can dramatically increase memory consumption. | ||
| 315 | */ | ||
| 316 | #ifdef PAGES_CAN_PURGE_FORCED | ||
| 317 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 318 | return ehooks_default_purge_forced_impl(addr, offset, length); | ||
| 319 | } | ||
| 320 | #endif | ||
| 321 | if (extent_hooks->purge_forced == NULL) { | ||
| 322 | return true; | ||
| 323 | } else { | ||
| 324 | ehooks_pre_reentrancy(tsdn); | ||
| 325 | bool err = extent_hooks->purge_forced(extent_hooks, addr, size, | ||
| 326 | offset, length, ehooks_ind_get(ehooks)); | ||
| 327 | ehooks_post_reentrancy(tsdn); | ||
| 328 | return err; | ||
| 329 | } | ||
| 330 | } | ||
| 331 | |||
| 332 | static inline bool | ||
| 333 | ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, | ||
| 334 | size_t size_a, size_t size_b, bool committed) { | ||
| 335 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 336 | if (ehooks_are_default(ehooks)) { | ||
| 337 | return ehooks_default_split_impl(); | ||
| 338 | } else if (extent_hooks->split == NULL) { | ||
| 339 | return true; | ||
| 340 | } else { | ||
| 341 | ehooks_pre_reentrancy(tsdn); | ||
| 342 | bool err = extent_hooks->split(extent_hooks, addr, size, size_a, | ||
| 343 | size_b, committed, ehooks_ind_get(ehooks)); | ||
| 344 | ehooks_post_reentrancy(tsdn); | ||
| 345 | return err; | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 349 | static inline bool | ||
| 350 | ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a, | ||
| 351 | void *addr_b, size_t size_b, bool committed) { | ||
| 352 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 353 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 354 | return ehooks_default_merge_impl(tsdn, addr_a, addr_b); | ||
| 355 | } else if (extent_hooks->merge == NULL) { | ||
| 356 | return true; | ||
| 357 | } else { | ||
| 358 | ehooks_pre_reentrancy(tsdn); | ||
| 359 | bool err = extent_hooks->merge(extent_hooks, addr_a, size_a, | ||
| 360 | addr_b, size_b, committed, ehooks_ind_get(ehooks)); | ||
| 361 | ehooks_post_reentrancy(tsdn); | ||
| 362 | return err; | ||
| 363 | } | ||
| 364 | } | ||
| 365 | |||
| 366 | static inline void | ||
| 367 | ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) { | ||
| 368 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 369 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 370 | ehooks_default_zero_impl(addr, size); | ||
| 371 | } else { | ||
| 372 | /* | ||
| 373 | * It would be correct to try using the user-provided purge | ||
| 374 | * hooks (since they are required to have zeroed the extent if | ||
| 375 | * they indicate success), but we don't necessarily know their | ||
| 376 | * cost. We'll be conservative and use memset. | ||
| 377 | */ | ||
| 378 | memset(addr, 0, size); | ||
| 379 | } | ||
| 380 | } | ||
| 381 | |||
| 382 | static inline bool | ||
| 383 | ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) { | ||
| 384 | bool err; | ||
| 385 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 386 | |||
| 387 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 388 | ehooks_default_guard_impl(guard1, guard2); | ||
| 389 | err = false; | ||
| 390 | } else { | ||
| 391 | err = true; | ||
| 392 | } | ||
| 393 | |||
| 394 | return err; | ||
| 395 | } | ||
| 396 | |||
| 397 | static inline bool | ||
| 398 | ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) { | ||
| 399 | bool err; | ||
| 400 | extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); | ||
| 401 | |||
| 402 | if (extent_hooks == &ehooks_default_extent_hooks) { | ||
| 403 | ehooks_default_unguard_impl(guard1, guard2); | ||
| 404 | err = false; | ||
| 405 | } else { | ||
| 406 | err = true; | ||
| 407 | } | ||
| 408 | |||
| 409 | return err; | ||
| 410 | } | ||
| 411 | |||
| 412 | #endif /* JEMALLOC_INTERNAL_EHOOKS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/emap.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/emap.h deleted file mode 100644 index 847af32..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/emap.h +++ /dev/null | |||
| @@ -1,357 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EMAP_H | ||
| 2 | #define JEMALLOC_INTERNAL_EMAP_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/base.h" | ||
| 5 | #include "jemalloc/internal/rtree.h" | ||
| 6 | |||
| 7 | /* | ||
| 8 | * Note: Ends without at semicolon, so that | ||
| 9 | * EMAP_DECLARE_RTREE_CTX; | ||
| 10 | * in uses will avoid empty-statement warnings. | ||
| 11 | */ | ||
| 12 | #define EMAP_DECLARE_RTREE_CTX \ | ||
| 13 | rtree_ctx_t rtree_ctx_fallback; \ | ||
| 14 | rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback) | ||
| 15 | |||
| 16 | typedef struct emap_s emap_t; | ||
| 17 | struct emap_s { | ||
| 18 | rtree_t rtree; | ||
| 19 | }; | ||
| 20 | |||
| 21 | /* Used to pass rtree lookup context down the path. */ | ||
| 22 | typedef struct emap_alloc_ctx_t emap_alloc_ctx_t; | ||
| 23 | struct emap_alloc_ctx_t { | ||
| 24 | szind_t szind; | ||
| 25 | bool slab; | ||
| 26 | }; | ||
| 27 | |||
| 28 | typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t; | ||
| 29 | struct emap_full_alloc_ctx_s { | ||
| 30 | szind_t szind; | ||
| 31 | bool slab; | ||
| 32 | edata_t *edata; | ||
| 33 | }; | ||
| 34 | |||
| 35 | bool emap_init(emap_t *emap, base_t *base, bool zeroed); | ||
| 36 | |||
| 37 | void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, | ||
| 38 | bool slab); | ||
| 39 | |||
| 40 | void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata, | ||
| 41 | extent_state_t state); | ||
| 42 | |||
| 43 | /* | ||
| 44 | * The two acquire functions below allow accessing neighbor edatas, if it's safe | ||
| 45 | * and valid to do so (i.e. from the same arena, of the same state, etc.). This | ||
| 46 | * is necessary because the ecache locks are state based, and only protect | ||
| 47 | * edatas with the same state. Therefore the neighbor edata's state needs to be | ||
| 48 | * verified first, before chasing the edata pointer. The returned edata will be | ||
| 49 | * in an acquired state, meaning other threads will be prevented from accessing | ||
| 50 | * it, even if technically the edata can still be discovered from the rtree. | ||
| 51 | * | ||
| 52 | * This means, at any moment when holding pointers to edata, either one of the | ||
| 53 | * state based locks is held (and the edatas are all of the protected state), or | ||
| 54 | * the edatas are in an acquired state (e.g. in active or merging state). The | ||
| 55 | * acquire operation itself (changing the edata to an acquired state) is done | ||
| 56 | * under the state locks. | ||
| 57 | */ | ||
| 58 | edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, | ||
| 59 | edata_t *edata, extent_pai_t pai, extent_state_t expected_state, | ||
| 60 | bool forward); | ||
| 61 | edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap, | ||
| 62 | edata_t *edata, extent_pai_t pai, extent_state_t expected_state); | ||
| 63 | void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata, | ||
| 64 | extent_state_t new_state); | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Associate the given edata with its beginning and end address, setting the | ||
| 68 | * szind and slab info appropriately. | ||
| 69 | * Returns true on error (i.e. resource exhaustion). | ||
| 70 | */ | ||
| 71 | bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata, | ||
| 72 | szind_t szind, bool slab); | ||
| 73 | |||
| 74 | /* | ||
| 75 | * Does the same thing, but with the interior of the range, for slab | ||
| 76 | * allocations. | ||
| 77 | * | ||
| 78 | * You might wonder why we don't just have a single emap_register function that | ||
| 79 | * does both depending on the value of 'slab'. The answer is twofold: | ||
| 80 | * - As a practical matter, in places like the extract->split->commit pathway, | ||
| 81 | * we defer the interior operation until we're sure that the commit won't fail | ||
| 82 | * (but we have to register the split boundaries there). | ||
| 83 | * - In general, we're trying to move to a world where the page-specific | ||
| 84 | * allocator doesn't know as much about how the pages it allocates will be | ||
| 85 | * used, and passing a 'slab' parameter everywhere makes that more | ||
| 86 | * complicated. | ||
| 87 | * | ||
| 88 | * Unlike the boundary version, this function can't fail; this is because slabs | ||
| 89 | * can't get big enough to touch a new page that neither of the boundaries | ||
| 90 | * touched, so no allocation is necessary to fill the interior once the boundary | ||
| 91 | * has been touched. | ||
| 92 | */ | ||
| 93 | void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata, | ||
| 94 | szind_t szind); | ||
| 95 | |||
| 96 | void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata); | ||
| 97 | void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata); | ||
| 98 | |||
| 99 | typedef struct emap_prepare_s emap_prepare_t; | ||
| 100 | struct emap_prepare_s { | ||
| 101 | rtree_leaf_elm_t *lead_elm_a; | ||
| 102 | rtree_leaf_elm_t *lead_elm_b; | ||
| 103 | rtree_leaf_elm_t *trail_elm_a; | ||
| 104 | rtree_leaf_elm_t *trail_elm_b; | ||
| 105 | }; | ||
| 106 | |||
| 107 | /** | ||
| 108 | * These functions the emap metadata management for merging, splitting, and | ||
| 109 | * reusing extents. In particular, they set the boundary mappings from | ||
| 110 | * addresses to edatas. If the result is going to be used as a slab, you | ||
| 111 | * still need to call emap_register_interior on it, though. | ||
| 112 | * | ||
| 113 | * Remap simply changes the szind and slab status of an extent's boundary | ||
| 114 | * mappings. If the extent is not a slab, it doesn't bother with updating the | ||
| 115 | * end mapping (since lookups only occur in the interior of an extent for | ||
| 116 | * slabs). Since the szind and slab status only make sense for active extents, | ||
| 117 | * this should only be called while activating or deactivating an extent. | ||
| 118 | * | ||
| 119 | * Split and merge have a "prepare" and a "commit" portion. The prepare portion | ||
| 120 | * does the operations that can be done without exclusive access to the extent | ||
| 121 | * in question, while the commit variant requires exclusive access to maintain | ||
| 122 | * the emap invariants. The only function that can fail is emap_split_prepare, | ||
| 123 | * and it returns true on failure (at which point the caller shouldn't commit). | ||
| 124 | * | ||
| 125 | * In all cases, "lead" refers to the lower-addressed extent, and trail to the | ||
| 126 | * higher-addressed one. It's the caller's responsibility to set the edata | ||
| 127 | * state appropriately. | ||
| 128 | */ | ||
| 129 | bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, | ||
| 130 | edata_t *edata, size_t size_a, edata_t *trail, size_t size_b); | ||
| 131 | void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, | ||
| 132 | edata_t *lead, size_t size_a, edata_t *trail, size_t size_b); | ||
| 133 | void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, | ||
| 134 | edata_t *lead, edata_t *trail); | ||
| 135 | void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, | ||
| 136 | edata_t *lead, edata_t *trail); | ||
| 137 | |||
| 138 | /* Assert that the emap's view of the given edata matches the edata's view. */ | ||
| 139 | void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata); | ||
| 140 | static inline void | ||
| 141 | emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { | ||
| 142 | if (config_debug) { | ||
| 143 | emap_do_assert_mapped(tsdn, emap, edata); | ||
| 144 | } | ||
| 145 | } | ||
| 146 | |||
| 147 | /* Assert that the given edata isn't in the map. */ | ||
| 148 | void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata); | ||
| 149 | static inline void | ||
| 150 | emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { | ||
| 151 | if (config_debug) { | ||
| 152 | emap_do_assert_not_mapped(tsdn, emap, edata); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | |||
| 156 | JEMALLOC_ALWAYS_INLINE bool | ||
| 157 | emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { | ||
| 158 | assert(config_debug); | ||
| 159 | emap_assert_mapped(tsdn, emap, edata); | ||
| 160 | |||
| 161 | EMAP_DECLARE_RTREE_CTX; | ||
| 162 | rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, | ||
| 163 | (uintptr_t)edata_base_get(edata)); | ||
| 164 | |||
| 165 | return edata_state_in_transition(contents.metadata.state); | ||
| 166 | } | ||
| 167 | |||
| 168 | JEMALLOC_ALWAYS_INLINE bool | ||
| 169 | emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { | ||
| 170 | if (!config_debug) { | ||
| 171 | /* For assertions only. */ | ||
| 172 | return false; | ||
| 173 | } | ||
| 174 | |||
| 175 | /* | ||
| 176 | * The edata is considered acquired if no other threads will attempt to | ||
| 177 | * read / write any fields from it. This includes a few cases: | ||
| 178 | * | ||
| 179 | * 1) edata not hooked into emap yet -- This implies the edata just got | ||
| 180 | * allocated or initialized. | ||
| 181 | * | ||
| 182 | * 2) in an active or transition state -- In both cases, the edata can | ||
| 183 | * be discovered from the emap, however the state tracked in the rtree | ||
| 184 | * will prevent other threads from accessing the actual edata. | ||
| 185 | */ | ||
| 186 | EMAP_DECLARE_RTREE_CTX; | ||
| 187 | rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree, | ||
| 188 | rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true, | ||
| 189 | /* init_missing */ false); | ||
| 190 | if (elm == NULL) { | ||
| 191 | return true; | ||
| 192 | } | ||
| 193 | rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm, | ||
| 194 | /* dependent */ true); | ||
| 195 | if (contents.edata == NULL || | ||
| 196 | contents.metadata.state == extent_state_active || | ||
| 197 | edata_state_in_transition(contents.metadata.state)) { | ||
| 198 | return true; | ||
| 199 | } | ||
| 200 | |||
| 201 | return false; | ||
| 202 | } | ||
| 203 | |||
| 204 | JEMALLOC_ALWAYS_INLINE void | ||
| 205 | extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) { | ||
| 206 | assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer)); | ||
| 207 | assert(edata_pai_get(inner) == edata_pai_get(outer)); | ||
| 208 | assert(edata_committed_get(inner) == edata_committed_get(outer)); | ||
| 209 | assert(edata_state_get(inner) == extent_state_active); | ||
| 210 | assert(edata_state_get(outer) == extent_state_merging); | ||
| 211 | assert(!edata_guarded_get(inner) && !edata_guarded_get(outer)); | ||
| 212 | assert(edata_base_get(inner) == edata_past_get(outer) || | ||
| 213 | edata_base_get(outer) == edata_past_get(inner)); | ||
| 214 | } | ||
| 215 | |||
| 216 | JEMALLOC_ALWAYS_INLINE void | ||
| 217 | extent_assert_can_expand(const edata_t *original, const edata_t *expand) { | ||
| 218 | assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand)); | ||
| 219 | assert(edata_pai_get(original) == edata_pai_get(expand)); | ||
| 220 | assert(edata_state_get(original) == extent_state_active); | ||
| 221 | assert(edata_state_get(expand) == extent_state_merging); | ||
| 222 | assert(edata_past_get(original) == edata_base_get(expand)); | ||
| 223 | } | ||
| 224 | |||
| 225 | JEMALLOC_ALWAYS_INLINE edata_t * | ||
| 226 | emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) { | ||
| 227 | EMAP_DECLARE_RTREE_CTX; | ||
| 228 | |||
| 229 | return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata; | ||
| 230 | } | ||
| 231 | |||
| 232 | /* Fills in alloc_ctx with the info in the map. */ | ||
| 233 | JEMALLOC_ALWAYS_INLINE void | ||
| 234 | emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, | ||
| 235 | emap_alloc_ctx_t *alloc_ctx) { | ||
| 236 | EMAP_DECLARE_RTREE_CTX; | ||
| 237 | |||
| 238 | rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree, | ||
| 239 | rtree_ctx, (uintptr_t)ptr); | ||
| 240 | alloc_ctx->szind = metadata.szind; | ||
| 241 | alloc_ctx->slab = metadata.slab; | ||
| 242 | } | ||
| 243 | |||
| 244 | /* The pointer must be mapped. */ | ||
| 245 | JEMALLOC_ALWAYS_INLINE void | ||
| 246 | emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, | ||
| 247 | emap_full_alloc_ctx_t *full_alloc_ctx) { | ||
| 248 | EMAP_DECLARE_RTREE_CTX; | ||
| 249 | |||
| 250 | rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, | ||
| 251 | (uintptr_t)ptr); | ||
| 252 | full_alloc_ctx->edata = contents.edata; | ||
| 253 | full_alloc_ctx->szind = contents.metadata.szind; | ||
| 254 | full_alloc_ctx->slab = contents.metadata.slab; | ||
| 255 | } | ||
| 256 | |||
| 257 | /* | ||
| 258 | * The pointer is allowed to not be mapped. | ||
| 259 | * | ||
| 260 | * Returns true when the pointer is not present. | ||
| 261 | */ | ||
| 262 | JEMALLOC_ALWAYS_INLINE bool | ||
| 263 | emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, | ||
| 264 | emap_full_alloc_ctx_t *full_alloc_ctx) { | ||
| 265 | EMAP_DECLARE_RTREE_CTX; | ||
| 266 | |||
| 267 | rtree_contents_t contents; | ||
| 268 | bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx, | ||
| 269 | (uintptr_t)ptr, &contents); | ||
| 270 | if (err) { | ||
| 271 | return true; | ||
| 272 | } | ||
| 273 | full_alloc_ctx->edata = contents.edata; | ||
| 274 | full_alloc_ctx->szind = contents.metadata.szind; | ||
| 275 | full_alloc_ctx->slab = contents.metadata.slab; | ||
| 276 | return false; | ||
| 277 | } | ||
| 278 | |||
| 279 | /* | ||
| 280 | * Only used on the fastpath of free. Returns true when cannot be fulfilled by | ||
| 281 | * fast path, e.g. when the metadata key is not cached. | ||
| 282 | */ | ||
| 283 | JEMALLOC_ALWAYS_INLINE bool | ||
| 284 | emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr, | ||
| 285 | emap_alloc_ctx_t *alloc_ctx) { | ||
| 286 | /* Use the unsafe getter since this may gets called during exit. */ | ||
| 287 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd); | ||
| 288 | |||
| 289 | rtree_metadata_t metadata; | ||
| 290 | bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree, | ||
| 291 | rtree_ctx, (uintptr_t)ptr, &metadata); | ||
| 292 | if (err) { | ||
| 293 | return true; | ||
| 294 | } | ||
| 295 | alloc_ctx->szind = metadata.szind; | ||
| 296 | alloc_ctx->slab = metadata.slab; | ||
| 297 | return false; | ||
| 298 | } | ||
| 299 | |||
| 300 | /* | ||
| 301 | * We want to do batch lookups out of the cache bins, which use | ||
| 302 | * cache_bin_ptr_array_get to access the i'th element of the bin (since they | ||
| 303 | * invert usual ordering in deciding what to flush). This lets the emap avoid | ||
| 304 | * caring about its caller's ordering. | ||
| 305 | */ | ||
| 306 | typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind); | ||
| 307 | /* | ||
| 308 | * This allows size-checking assertions, which we can only do while we're in the | ||
| 309 | * process of edata lookups. | ||
| 310 | */ | ||
| 311 | typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx); | ||
| 312 | |||
| 313 | typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t; | ||
| 314 | union emap_batch_lookup_result_u { | ||
| 315 | edata_t *edata; | ||
| 316 | rtree_leaf_elm_t *rtree_leaf; | ||
| 317 | }; | ||
| 318 | |||
| 319 | JEMALLOC_ALWAYS_INLINE void | ||
| 320 | emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs, | ||
| 321 | emap_ptr_getter ptr_getter, void *ptr_getter_ctx, | ||
| 322 | emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx, | ||
| 323 | emap_batch_lookup_result_t *result) { | ||
| 324 | /* Avoids null-checking tsdn in the loop below. */ | ||
| 325 | util_assume(tsd != NULL); | ||
| 326 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd); | ||
| 327 | |||
| 328 | for (size_t i = 0; i < nptrs; i++) { | ||
| 329 | const void *ptr = ptr_getter(ptr_getter_ctx, i); | ||
| 330 | /* | ||
| 331 | * Reuse the edatas array as a temp buffer, lying a little about | ||
| 332 | * the types. | ||
| 333 | */ | ||
| 334 | result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd), | ||
| 335 | &emap->rtree, rtree_ctx, (uintptr_t)ptr, | ||
| 336 | /* dependent */ true, /* init_missing */ false); | ||
| 337 | } | ||
| 338 | |||
| 339 | for (size_t i = 0; i < nptrs; i++) { | ||
| 340 | rtree_leaf_elm_t *elm = result[i].rtree_leaf; | ||
| 341 | rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd), | ||
| 342 | &emap->rtree, elm, /* dependent */ true); | ||
| 343 | result[i].edata = contents.edata; | ||
| 344 | emap_full_alloc_ctx_t alloc_ctx; | ||
| 345 | /* | ||
| 346 | * Not all these fields are read in practice by the metadata | ||
| 347 | * visitor. But the compiler can easily optimize away the ones | ||
| 348 | * that aren't, so no sense in being incomplete. | ||
| 349 | */ | ||
| 350 | alloc_ctx.szind = contents.metadata.szind; | ||
| 351 | alloc_ctx.slab = contents.metadata.slab; | ||
| 352 | alloc_ctx.edata = contents.edata; | ||
| 353 | metadata_visitor(metadata_visitor_ctx, &alloc_ctx); | ||
| 354 | } | ||
| 355 | } | ||
| 356 | |||
| 357 | #endif /* JEMALLOC_INTERNAL_EMAP_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/emitter.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/emitter.h deleted file mode 100644 index 9482f68..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/emitter.h +++ /dev/null | |||
| @@ -1,510 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EMITTER_H | ||
| 2 | #define JEMALLOC_INTERNAL_EMITTER_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/ql.h" | ||
| 5 | |||
| 6 | typedef enum emitter_output_e emitter_output_t; | ||
| 7 | enum emitter_output_e { | ||
| 8 | emitter_output_json, | ||
| 9 | emitter_output_json_compact, | ||
| 10 | emitter_output_table | ||
| 11 | }; | ||
| 12 | |||
| 13 | typedef enum emitter_justify_e emitter_justify_t; | ||
| 14 | enum emitter_justify_e { | ||
| 15 | emitter_justify_left, | ||
| 16 | emitter_justify_right, | ||
| 17 | /* Not for users; just to pass to internal functions. */ | ||
| 18 | emitter_justify_none | ||
| 19 | }; | ||
| 20 | |||
| 21 | typedef enum emitter_type_e emitter_type_t; | ||
| 22 | enum emitter_type_e { | ||
| 23 | emitter_type_bool, | ||
| 24 | emitter_type_int, | ||
| 25 | emitter_type_int64, | ||
| 26 | emitter_type_unsigned, | ||
| 27 | emitter_type_uint32, | ||
| 28 | emitter_type_uint64, | ||
| 29 | emitter_type_size, | ||
| 30 | emitter_type_ssize, | ||
| 31 | emitter_type_string, | ||
| 32 | /* | ||
| 33 | * A title is a column title in a table; it's just a string, but it's | ||
| 34 | * not quoted. | ||
| 35 | */ | ||
| 36 | emitter_type_title, | ||
| 37 | }; | ||
| 38 | |||
| 39 | typedef struct emitter_col_s emitter_col_t; | ||
| 40 | struct emitter_col_s { | ||
| 41 | /* Filled in by the user. */ | ||
| 42 | emitter_justify_t justify; | ||
| 43 | int width; | ||
| 44 | emitter_type_t type; | ||
| 45 | union { | ||
| 46 | bool bool_val; | ||
| 47 | int int_val; | ||
| 48 | unsigned unsigned_val; | ||
| 49 | uint32_t uint32_val; | ||
| 50 | uint32_t uint32_t_val; | ||
| 51 | uint64_t uint64_val; | ||
| 52 | uint64_t uint64_t_val; | ||
| 53 | size_t size_val; | ||
| 54 | ssize_t ssize_val; | ||
| 55 | const char *str_val; | ||
| 56 | }; | ||
| 57 | |||
| 58 | /* Filled in by initialization. */ | ||
| 59 | ql_elm(emitter_col_t) link; | ||
| 60 | }; | ||
| 61 | |||
| 62 | typedef struct emitter_row_s emitter_row_t; | ||
| 63 | struct emitter_row_s { | ||
| 64 | ql_head(emitter_col_t) cols; | ||
| 65 | }; | ||
| 66 | |||
| 67 | typedef struct emitter_s emitter_t; | ||
| 68 | struct emitter_s { | ||
| 69 | emitter_output_t output; | ||
| 70 | /* The output information. */ | ||
| 71 | write_cb_t *write_cb; | ||
| 72 | void *cbopaque; | ||
| 73 | int nesting_depth; | ||
| 74 | /* True if we've already emitted a value at the given depth. */ | ||
| 75 | bool item_at_depth; | ||
| 76 | /* True if we emitted a key and will emit corresponding value next. */ | ||
| 77 | bool emitted_key; | ||
| 78 | }; | ||
| 79 | |||
| 80 | static inline bool | ||
| 81 | emitter_outputs_json(emitter_t *emitter) { | ||
| 82 | return emitter->output == emitter_output_json || | ||
| 83 | emitter->output == emitter_output_json_compact; | ||
| 84 | } | ||
| 85 | |||
| 86 | /* Internal convenience function. Write to the emitter the given string. */ | ||
| 87 | JEMALLOC_FORMAT_PRINTF(2, 3) | ||
| 88 | static inline void | ||
| 89 | emitter_printf(emitter_t *emitter, const char *format, ...) { | ||
| 90 | va_list ap; | ||
| 91 | |||
| 92 | va_start(ap, format); | ||
| 93 | malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); | ||
| 94 | va_end(ap); | ||
| 95 | } | ||
| 96 | |||
| 97 | static inline const char * JEMALLOC_FORMAT_ARG(3) | ||
| 98 | emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, | ||
| 99 | emitter_justify_t justify, int width) { | ||
| 100 | size_t written; | ||
| 101 | fmt_specifier++; | ||
| 102 | if (justify == emitter_justify_none) { | ||
| 103 | written = malloc_snprintf(out_fmt, out_size, | ||
| 104 | "%%%s", fmt_specifier); | ||
| 105 | } else if (justify == emitter_justify_left) { | ||
| 106 | written = malloc_snprintf(out_fmt, out_size, | ||
| 107 | "%%-%d%s", width, fmt_specifier); | ||
| 108 | } else { | ||
| 109 | written = malloc_snprintf(out_fmt, out_size, | ||
| 110 | "%%%d%s", width, fmt_specifier); | ||
| 111 | } | ||
| 112 | /* Only happens in case of bad format string, which *we* choose. */ | ||
| 113 | assert(written < out_size); | ||
| 114 | return out_fmt; | ||
| 115 | } | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Internal. Emit the given value type in the relevant encoding (so that the | ||
| 119 | * bool true gets mapped to json "true", but the string "true" gets mapped to | ||
| 120 | * json "\"true\"", for instance. | ||
| 121 | * | ||
| 122 | * Width is ignored if justify is emitter_justify_none. | ||
| 123 | */ | ||
| 124 | static inline void | ||
| 125 | emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, | ||
| 126 | emitter_type_t value_type, const void *value) { | ||
| 127 | size_t str_written; | ||
| 128 | #define BUF_SIZE 256 | ||
| 129 | #define FMT_SIZE 10 | ||
| 130 | /* | ||
| 131 | * We dynamically generate a format string to emit, to let us use the | ||
| 132 | * snprintf machinery. This is kinda hacky, but gets the job done | ||
| 133 | * quickly without having to think about the various snprintf edge | ||
| 134 | * cases. | ||
| 135 | */ | ||
| 136 | char fmt[FMT_SIZE]; | ||
| 137 | char buf[BUF_SIZE]; | ||
| 138 | |||
| 139 | #define EMIT_SIMPLE(type, format) \ | ||
| 140 | emitter_printf(emitter, \ | ||
| 141 | emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \ | ||
| 142 | *(const type *)value); | ||
| 143 | |||
| 144 | switch (value_type) { | ||
| 145 | case emitter_type_bool: | ||
| 146 | emitter_printf(emitter, | ||
| 147 | emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), | ||
| 148 | *(const bool *)value ? "true" : "false"); | ||
| 149 | break; | ||
| 150 | case emitter_type_int: | ||
| 151 | EMIT_SIMPLE(int, "%d") | ||
| 152 | break; | ||
| 153 | case emitter_type_int64: | ||
| 154 | EMIT_SIMPLE(int64_t, "%" FMTd64) | ||
| 155 | break; | ||
| 156 | case emitter_type_unsigned: | ||
| 157 | EMIT_SIMPLE(unsigned, "%u") | ||
| 158 | break; | ||
| 159 | case emitter_type_ssize: | ||
| 160 | EMIT_SIMPLE(ssize_t, "%zd") | ||
| 161 | break; | ||
| 162 | case emitter_type_size: | ||
| 163 | EMIT_SIMPLE(size_t, "%zu") | ||
| 164 | break; | ||
| 165 | case emitter_type_string: | ||
| 166 | str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", | ||
| 167 | *(const char *const *)value); | ||
| 168 | /* | ||
| 169 | * We control the strings we output; we shouldn't get anything | ||
| 170 | * anywhere near the fmt size. | ||
| 171 | */ | ||
| 172 | assert(str_written < BUF_SIZE); | ||
| 173 | emitter_printf(emitter, | ||
| 174 | emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf); | ||
| 175 | break; | ||
| 176 | case emitter_type_uint32: | ||
| 177 | EMIT_SIMPLE(uint32_t, "%" FMTu32) | ||
| 178 | break; | ||
| 179 | case emitter_type_uint64: | ||
| 180 | EMIT_SIMPLE(uint64_t, "%" FMTu64) | ||
| 181 | break; | ||
| 182 | case emitter_type_title: | ||
| 183 | EMIT_SIMPLE(char *const, "%s"); | ||
| 184 | break; | ||
| 185 | default: | ||
| 186 | unreachable(); | ||
| 187 | } | ||
| 188 | #undef BUF_SIZE | ||
| 189 | #undef FMT_SIZE | ||
| 190 | } | ||
| 191 | |||
| 192 | |||
| 193 | /* Internal functions. In json mode, tracks nesting state. */ | ||
| 194 | static inline void | ||
| 195 | emitter_nest_inc(emitter_t *emitter) { | ||
| 196 | emitter->nesting_depth++; | ||
| 197 | emitter->item_at_depth = false; | ||
| 198 | } | ||
| 199 | |||
| 200 | static inline void | ||
| 201 | emitter_nest_dec(emitter_t *emitter) { | ||
| 202 | emitter->nesting_depth--; | ||
| 203 | emitter->item_at_depth = true; | ||
| 204 | } | ||
| 205 | |||
| 206 | static inline void | ||
| 207 | emitter_indent(emitter_t *emitter) { | ||
| 208 | int amount = emitter->nesting_depth; | ||
| 209 | const char *indent_str; | ||
| 210 | assert(emitter->output != emitter_output_json_compact); | ||
| 211 | if (emitter->output == emitter_output_json) { | ||
| 212 | indent_str = "\t"; | ||
| 213 | } else { | ||
| 214 | amount *= 2; | ||
| 215 | indent_str = " "; | ||
| 216 | } | ||
| 217 | for (int i = 0; i < amount; i++) { | ||
| 218 | emitter_printf(emitter, "%s", indent_str); | ||
| 219 | } | ||
| 220 | } | ||
| 221 | |||
| 222 | static inline void | ||
| 223 | emitter_json_key_prefix(emitter_t *emitter) { | ||
| 224 | assert(emitter_outputs_json(emitter)); | ||
| 225 | if (emitter->emitted_key) { | ||
| 226 | emitter->emitted_key = false; | ||
| 227 | return; | ||
| 228 | } | ||
| 229 | if (emitter->item_at_depth) { | ||
| 230 | emitter_printf(emitter, ","); | ||
| 231 | } | ||
| 232 | if (emitter->output != emitter_output_json_compact) { | ||
| 233 | emitter_printf(emitter, "\n"); | ||
| 234 | emitter_indent(emitter); | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 238 | /******************************************************************************/ | ||
| 239 | /* Public functions for emitter_t. */ | ||
| 240 | |||
| 241 | static inline void | ||
| 242 | emitter_init(emitter_t *emitter, emitter_output_t emitter_output, | ||
| 243 | write_cb_t *write_cb, void *cbopaque) { | ||
| 244 | emitter->output = emitter_output; | ||
| 245 | emitter->write_cb = write_cb; | ||
| 246 | emitter->cbopaque = cbopaque; | ||
| 247 | emitter->item_at_depth = false; | ||
| 248 | emitter->emitted_key = false; | ||
| 249 | emitter->nesting_depth = 0; | ||
| 250 | } | ||
| 251 | |||
| 252 | /******************************************************************************/ | ||
| 253 | /* JSON public API. */ | ||
| 254 | |||
| 255 | /* | ||
| 256 | * Emits a key (e.g. as appears in an object). The next json entity emitted will | ||
| 257 | * be the corresponding value. | ||
| 258 | */ | ||
| 259 | static inline void | ||
| 260 | emitter_json_key(emitter_t *emitter, const char *json_key) { | ||
| 261 | if (emitter_outputs_json(emitter)) { | ||
| 262 | emitter_json_key_prefix(emitter); | ||
| 263 | emitter_printf(emitter, "\"%s\":%s", json_key, | ||
| 264 | emitter->output == emitter_output_json_compact ? "" : " "); | ||
| 265 | emitter->emitted_key = true; | ||
| 266 | } | ||
| 267 | } | ||
| 268 | |||
| 269 | static inline void | ||
| 270 | emitter_json_value(emitter_t *emitter, emitter_type_t value_type, | ||
| 271 | const void *value) { | ||
| 272 | if (emitter_outputs_json(emitter)) { | ||
| 273 | emitter_json_key_prefix(emitter); | ||
| 274 | emitter_print_value(emitter, emitter_justify_none, -1, | ||
| 275 | value_type, value); | ||
| 276 | emitter->item_at_depth = true; | ||
| 277 | } | ||
| 278 | } | ||
| 279 | |||
| 280 | /* Shorthand for calling emitter_json_key and then emitter_json_value. */ | ||
| 281 | static inline void | ||
| 282 | emitter_json_kv(emitter_t *emitter, const char *json_key, | ||
| 283 | emitter_type_t value_type, const void *value) { | ||
| 284 | emitter_json_key(emitter, json_key); | ||
| 285 | emitter_json_value(emitter, value_type, value); | ||
| 286 | } | ||
| 287 | |||
| 288 | static inline void | ||
| 289 | emitter_json_array_begin(emitter_t *emitter) { | ||
| 290 | if (emitter_outputs_json(emitter)) { | ||
| 291 | emitter_json_key_prefix(emitter); | ||
| 292 | emitter_printf(emitter, "["); | ||
| 293 | emitter_nest_inc(emitter); | ||
| 294 | } | ||
| 295 | } | ||
| 296 | |||
| 297 | /* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */ | ||
| 298 | static inline void | ||
| 299 | emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) { | ||
| 300 | emitter_json_key(emitter, json_key); | ||
| 301 | emitter_json_array_begin(emitter); | ||
| 302 | } | ||
| 303 | |||
| 304 | static inline void | ||
| 305 | emitter_json_array_end(emitter_t *emitter) { | ||
| 306 | if (emitter_outputs_json(emitter)) { | ||
| 307 | assert(emitter->nesting_depth > 0); | ||
| 308 | emitter_nest_dec(emitter); | ||
| 309 | if (emitter->output != emitter_output_json_compact) { | ||
| 310 | emitter_printf(emitter, "\n"); | ||
| 311 | emitter_indent(emitter); | ||
| 312 | } | ||
| 313 | emitter_printf(emitter, "]"); | ||
| 314 | } | ||
| 315 | } | ||
| 316 | |||
| 317 | static inline void | ||
| 318 | emitter_json_object_begin(emitter_t *emitter) { | ||
| 319 | if (emitter_outputs_json(emitter)) { | ||
| 320 | emitter_json_key_prefix(emitter); | ||
| 321 | emitter_printf(emitter, "{"); | ||
| 322 | emitter_nest_inc(emitter); | ||
| 323 | } | ||
| 324 | } | ||
| 325 | |||
| 326 | /* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */ | ||
| 327 | static inline void | ||
| 328 | emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) { | ||
| 329 | emitter_json_key(emitter, json_key); | ||
| 330 | emitter_json_object_begin(emitter); | ||
| 331 | } | ||
| 332 | |||
| 333 | static inline void | ||
| 334 | emitter_json_object_end(emitter_t *emitter) { | ||
| 335 | if (emitter_outputs_json(emitter)) { | ||
| 336 | assert(emitter->nesting_depth > 0); | ||
| 337 | emitter_nest_dec(emitter); | ||
| 338 | if (emitter->output != emitter_output_json_compact) { | ||
| 339 | emitter_printf(emitter, "\n"); | ||
| 340 | emitter_indent(emitter); | ||
| 341 | } | ||
| 342 | emitter_printf(emitter, "}"); | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | |||
| 347 | /******************************************************************************/ | ||
| 348 | /* Table public API. */ | ||
| 349 | |||
| 350 | static inline void | ||
| 351 | emitter_table_dict_begin(emitter_t *emitter, const char *table_key) { | ||
| 352 | if (emitter->output == emitter_output_table) { | ||
| 353 | emitter_indent(emitter); | ||
| 354 | emitter_printf(emitter, "%s\n", table_key); | ||
| 355 | emitter_nest_inc(emitter); | ||
| 356 | } | ||
| 357 | } | ||
| 358 | |||
| 359 | static inline void | ||
| 360 | emitter_table_dict_end(emitter_t *emitter) { | ||
| 361 | if (emitter->output == emitter_output_table) { | ||
| 362 | emitter_nest_dec(emitter); | ||
| 363 | } | ||
| 364 | } | ||
| 365 | |||
| 366 | static inline void | ||
| 367 | emitter_table_kv_note(emitter_t *emitter, const char *table_key, | ||
| 368 | emitter_type_t value_type, const void *value, | ||
| 369 | const char *table_note_key, emitter_type_t table_note_value_type, | ||
| 370 | const void *table_note_value) { | ||
| 371 | if (emitter->output == emitter_output_table) { | ||
| 372 | emitter_indent(emitter); | ||
| 373 | emitter_printf(emitter, "%s: ", table_key); | ||
| 374 | emitter_print_value(emitter, emitter_justify_none, -1, | ||
| 375 | value_type, value); | ||
| 376 | if (table_note_key != NULL) { | ||
| 377 | emitter_printf(emitter, " (%s: ", table_note_key); | ||
| 378 | emitter_print_value(emitter, emitter_justify_none, -1, | ||
| 379 | table_note_value_type, table_note_value); | ||
| 380 | emitter_printf(emitter, ")"); | ||
| 381 | } | ||
| 382 | emitter_printf(emitter, "\n"); | ||
| 383 | } | ||
| 384 | emitter->item_at_depth = true; | ||
| 385 | } | ||
| 386 | |||
| 387 | static inline void | ||
| 388 | emitter_table_kv(emitter_t *emitter, const char *table_key, | ||
| 389 | emitter_type_t value_type, const void *value) { | ||
| 390 | emitter_table_kv_note(emitter, table_key, value_type, value, NULL, | ||
| 391 | emitter_type_bool, NULL); | ||
| 392 | } | ||
| 393 | |||
| 394 | |||
| 395 | /* Write to the emitter the given string, but only in table mode. */ | ||
| 396 | JEMALLOC_FORMAT_PRINTF(2, 3) | ||
| 397 | static inline void | ||
| 398 | emitter_table_printf(emitter_t *emitter, const char *format, ...) { | ||
| 399 | if (emitter->output == emitter_output_table) { | ||
| 400 | va_list ap; | ||
| 401 | va_start(ap, format); | ||
| 402 | malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); | ||
| 403 | va_end(ap); | ||
| 404 | } | ||
| 405 | } | ||
| 406 | |||
| 407 | static inline void | ||
| 408 | emitter_table_row(emitter_t *emitter, emitter_row_t *row) { | ||
| 409 | if (emitter->output != emitter_output_table) { | ||
| 410 | return; | ||
| 411 | } | ||
| 412 | emitter_col_t *col; | ||
| 413 | ql_foreach(col, &row->cols, link) { | ||
| 414 | emitter_print_value(emitter, col->justify, col->width, | ||
| 415 | col->type, (const void *)&col->bool_val); | ||
| 416 | } | ||
| 417 | emitter_table_printf(emitter, "\n"); | ||
| 418 | } | ||
| 419 | |||
| 420 | static inline void | ||
| 421 | emitter_row_init(emitter_row_t *row) { | ||
| 422 | ql_new(&row->cols); | ||
| 423 | } | ||
| 424 | |||
| 425 | static inline void | ||
| 426 | emitter_col_init(emitter_col_t *col, emitter_row_t *row) { | ||
| 427 | ql_elm_new(col, link); | ||
| 428 | ql_tail_insert(&row->cols, col, link); | ||
| 429 | } | ||
| 430 | |||
| 431 | |||
| 432 | /******************************************************************************/ | ||
| 433 | /* | ||
| 434 | * Generalized public API. Emits using either JSON or table, according to | ||
| 435 | * settings in the emitter_t. */ | ||
| 436 | |||
| 437 | /* | ||
| 438 | * Note emits a different kv pair as well, but only in table mode. Omits the | ||
| 439 | * note if table_note_key is NULL. | ||
| 440 | */ | ||
| 441 | static inline void | ||
| 442 | emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, | ||
| 443 | emitter_type_t value_type, const void *value, | ||
| 444 | const char *table_note_key, emitter_type_t table_note_value_type, | ||
| 445 | const void *table_note_value) { | ||
| 446 | if (emitter_outputs_json(emitter)) { | ||
| 447 | emitter_json_key(emitter, json_key); | ||
| 448 | emitter_json_value(emitter, value_type, value); | ||
| 449 | } else { | ||
| 450 | emitter_table_kv_note(emitter, table_key, value_type, value, | ||
| 451 | table_note_key, table_note_value_type, table_note_value); | ||
| 452 | } | ||
| 453 | emitter->item_at_depth = true; | ||
| 454 | } | ||
| 455 | |||
| 456 | static inline void | ||
| 457 | emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, | ||
| 458 | emitter_type_t value_type, const void *value) { | ||
| 459 | emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL, | ||
| 460 | emitter_type_bool, NULL); | ||
| 461 | } | ||
| 462 | |||
| 463 | static inline void | ||
| 464 | emitter_dict_begin(emitter_t *emitter, const char *json_key, | ||
| 465 | const char *table_header) { | ||
| 466 | if (emitter_outputs_json(emitter)) { | ||
| 467 | emitter_json_key(emitter, json_key); | ||
| 468 | emitter_json_object_begin(emitter); | ||
| 469 | } else { | ||
| 470 | emitter_table_dict_begin(emitter, table_header); | ||
| 471 | } | ||
| 472 | } | ||
| 473 | |||
| 474 | static inline void | ||
| 475 | emitter_dict_end(emitter_t *emitter) { | ||
| 476 | if (emitter_outputs_json(emitter)) { | ||
| 477 | emitter_json_object_end(emitter); | ||
| 478 | } else { | ||
| 479 | emitter_table_dict_end(emitter); | ||
| 480 | } | ||
| 481 | } | ||
| 482 | |||
| 483 | static inline void | ||
| 484 | emitter_begin(emitter_t *emitter) { | ||
| 485 | if (emitter_outputs_json(emitter)) { | ||
| 486 | assert(emitter->nesting_depth == 0); | ||
| 487 | emitter_printf(emitter, "{"); | ||
| 488 | emitter_nest_inc(emitter); | ||
| 489 | } else { | ||
| 490 | /* | ||
| 491 | * This guarantees that we always call write_cb at least once. | ||
| 492 | * This is useful if some invariant is established by each call | ||
| 493 | * to write_cb, but doesn't hold initially: e.g., some buffer | ||
| 494 | * holds a null-terminated string. | ||
| 495 | */ | ||
| 496 | emitter_printf(emitter, "%s", ""); | ||
| 497 | } | ||
| 498 | } | ||
| 499 | |||
| 500 | static inline void | ||
| 501 | emitter_end(emitter_t *emitter) { | ||
| 502 | if (emitter_outputs_json(emitter)) { | ||
| 503 | assert(emitter->nesting_depth == 1); | ||
| 504 | emitter_nest_dec(emitter); | ||
| 505 | emitter_printf(emitter, "%s", emitter->output == | ||
| 506 | emitter_output_json_compact ? "}" : "\n}\n"); | ||
| 507 | } | ||
| 508 | } | ||
| 509 | |||
| 510 | #endif /* JEMALLOC_INTERNAL_EMITTER_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/eset.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/eset.h deleted file mode 100644 index 4f689b4..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/eset.h +++ /dev/null | |||
| @@ -1,77 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_ESET_H | ||
| 2 | #define JEMALLOC_INTERNAL_ESET_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/fb.h" | ||
| 6 | #include "jemalloc/internal/edata.h" | ||
| 7 | #include "jemalloc/internal/mutex.h" | ||
| 8 | |||
| 9 | /* | ||
| 10 | * An eset ("extent set") is a quantized collection of extents, with built-in | ||
| 11 | * LRU queue. | ||
| 12 | * | ||
| 13 | * This class is not thread-safe; synchronization must be done externally if | ||
| 14 | * there are mutating operations. One exception is the stats counters, which | ||
| 15 | * may be read without any locking. | ||
| 16 | */ | ||
| 17 | |||
| 18 | typedef struct eset_bin_s eset_bin_t; | ||
| 19 | struct eset_bin_s { | ||
| 20 | edata_heap_t heap; | ||
| 21 | /* | ||
| 22 | * We do first-fit across multiple size classes. If we compared against | ||
| 23 | * the min element in each heap directly, we'd take a cache miss per | ||
| 24 | * extent we looked at. If we co-locate the edata summaries, we only | ||
| 25 | * take a miss on the edata we're actually going to return (which is | ||
| 26 | * inevitable anyways). | ||
| 27 | */ | ||
| 28 | edata_cmp_summary_t heap_min; | ||
| 29 | }; | ||
| 30 | |||
| 31 | typedef struct eset_bin_stats_s eset_bin_stats_t; | ||
| 32 | struct eset_bin_stats_s { | ||
| 33 | atomic_zu_t nextents; | ||
| 34 | atomic_zu_t nbytes; | ||
| 35 | }; | ||
| 36 | |||
| 37 | typedef struct eset_s eset_t; | ||
| 38 | struct eset_s { | ||
| 39 | /* Bitmap for which set bits correspond to non-empty heaps. */ | ||
| 40 | fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)]; | ||
| 41 | |||
| 42 | /* Quantized per size class heaps of extents. */ | ||
| 43 | eset_bin_t bins[SC_NPSIZES + 1]; | ||
| 44 | |||
| 45 | eset_bin_stats_t bin_stats[SC_NPSIZES + 1]; | ||
| 46 | |||
| 47 | /* LRU of all extents in heaps. */ | ||
| 48 | edata_list_inactive_t lru; | ||
| 49 | |||
| 50 | /* Page sum for all extents in heaps. */ | ||
| 51 | atomic_zu_t npages; | ||
| 52 | |||
| 53 | /* | ||
| 54 | * A duplication of the data in the containing ecache. We use this only | ||
| 55 | * for assertions on the states of the passed-in extents. | ||
| 56 | */ | ||
| 57 | extent_state_t state; | ||
| 58 | }; | ||
| 59 | |||
| 60 | void eset_init(eset_t *eset, extent_state_t state); | ||
| 61 | |||
| 62 | size_t eset_npages_get(eset_t *eset); | ||
| 63 | /* Get the number of extents in the given page size index. */ | ||
| 64 | size_t eset_nextents_get(eset_t *eset, pszind_t ind); | ||
| 65 | /* Get the sum total bytes of the extents in the given page size index. */ | ||
| 66 | size_t eset_nbytes_get(eset_t *eset, pszind_t ind); | ||
| 67 | |||
| 68 | void eset_insert(eset_t *eset, edata_t *edata); | ||
| 69 | void eset_remove(eset_t *eset, edata_t *edata); | ||
| 70 | /* | ||
| 71 | * Select an extent from this eset of the given size and alignment. Returns | ||
| 72 | * null if no such item could be found. | ||
| 73 | */ | ||
| 74 | edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only, | ||
| 75 | unsigned lg_max_fit); | ||
| 76 | |||
| 77 | #endif /* JEMALLOC_INTERNAL_ESET_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/exp_grow.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/exp_grow.h deleted file mode 100644 index 8566b8a..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/exp_grow.h +++ /dev/null | |||
| @@ -1,50 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EXP_GROW_H | ||
| 2 | #define JEMALLOC_INTERNAL_EXP_GROW_H | ||
| 3 | |||
| 4 | typedef struct exp_grow_s exp_grow_t; | ||
| 5 | struct exp_grow_s { | ||
| 6 | /* | ||
| 7 | * Next extent size class in a growing series to use when satisfying a | ||
| 8 | * request via the extent hooks (only if opt_retain). This limits the | ||
| 9 | * number of disjoint virtual memory ranges so that extent merging can | ||
| 10 | * be effective even if multiple arenas' extent allocation requests are | ||
| 11 | * highly interleaved. | ||
| 12 | * | ||
| 13 | * retain_grow_limit is the max allowed size ind to expand (unless the | ||
| 14 | * required size is greater). Default is no limit, and controlled | ||
| 15 | * through mallctl only. | ||
| 16 | */ | ||
| 17 | pszind_t next; | ||
| 18 | pszind_t limit; | ||
| 19 | }; | ||
| 20 | |||
| 21 | static inline bool | ||
| 22 | exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min, | ||
| 23 | size_t *r_alloc_size, pszind_t *r_skip) { | ||
| 24 | *r_skip = 0; | ||
| 25 | *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip); | ||
| 26 | while (*r_alloc_size < alloc_size_min) { | ||
| 27 | (*r_skip)++; | ||
| 28 | if (exp_grow->next + *r_skip >= | ||
| 29 | sz_psz2ind(SC_LARGE_MAXCLASS)) { | ||
| 30 | /* Outside legal range. */ | ||
| 31 | return true; | ||
| 32 | } | ||
| 33 | *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip); | ||
| 34 | } | ||
| 35 | return false; | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline void | ||
| 39 | exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) { | ||
| 40 | if (exp_grow->next + skip + 1 <= exp_grow->limit) { | ||
| 41 | exp_grow->next += skip + 1; | ||
| 42 | } else { | ||
| 43 | exp_grow->next = exp_grow->limit; | ||
| 44 | } | ||
| 45 | |||
| 46 | } | ||
| 47 | |||
| 48 | void exp_grow_init(exp_grow_t *exp_grow); | ||
| 49 | |||
| 50 | #endif /* JEMALLOC_INTERNAL_EXP_GROW_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent.h deleted file mode 100644 index 1d51d41..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent.h +++ /dev/null | |||
| @@ -1,137 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EXTENT_H | ||
| 2 | #define JEMALLOC_INTERNAL_EXTENT_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/ecache.h" | ||
| 5 | #include "jemalloc/internal/ehooks.h" | ||
| 6 | #include "jemalloc/internal/ph.h" | ||
| 7 | #include "jemalloc/internal/rtree.h" | ||
| 8 | |||
| 9 | /* | ||
| 10 | * This module contains the page-level allocator. It chooses the addresses that | ||
| 11 | * allocations requested by other modules will inhabit, and updates the global | ||
| 12 | * metadata to reflect allocation/deallocation/purging decisions. | ||
| 13 | */ | ||
| 14 | |||
| 15 | /* | ||
| 16 | * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) | ||
| 17 | * is the max ratio between the size of the active extent and the new extent. | ||
| 18 | */ | ||
| 19 | #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 | ||
| 20 | extern size_t opt_lg_extent_max_active_fit; | ||
| 21 | |||
| 22 | edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 23 | ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, | ||
| 24 | bool zero, bool guarded); | ||
| 25 | edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 26 | ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, | ||
| 27 | bool zero, bool guarded); | ||
| 28 | void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 29 | ecache_t *ecache, edata_t *edata); | ||
| 30 | edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 31 | ecache_t *ecache, size_t npages_min); | ||
| 32 | |||
| 33 | void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata); | ||
| 34 | void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, | ||
| 35 | edata_t *edata); | ||
| 36 | void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 37 | edata_t *edata); | ||
| 38 | edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 39 | void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, | ||
| 40 | bool growing_retained); | ||
| 41 | void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 42 | edata_t *edata); | ||
| 43 | void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 44 | edata_t *edata); | ||
| 45 | bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 46 | size_t offset, size_t length); | ||
| 47 | bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 48 | size_t offset, size_t length); | ||
| 49 | bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 50 | size_t offset, size_t length); | ||
| 51 | bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 52 | size_t offset, size_t length); | ||
| 53 | edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, | ||
| 54 | ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b, | ||
| 55 | bool holding_core_locks); | ||
| 56 | bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, | ||
| 57 | edata_t *a, edata_t *b); | ||
| 58 | bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 59 | bool commit, bool zero, bool growing_retained); | ||
| 60 | size_t extent_sn_next(pac_t *pac); | ||
| 61 | bool extent_boot(void); | ||
| 62 | |||
| 63 | JEMALLOC_ALWAYS_INLINE bool | ||
| 64 | extent_neighbor_head_state_mergeable(bool edata_is_head, | ||
| 65 | bool neighbor_is_head, bool forward) { | ||
| 66 | /* | ||
| 67 | * Head states checking: disallow merging if the higher addr extent is a | ||
| 68 | * head extent. This helps preserve first-fit, and more importantly | ||
| 69 | * makes sure no merge across arenas. | ||
| 70 | */ | ||
| 71 | if (forward) { | ||
| 72 | if (neighbor_is_head) { | ||
| 73 | return false; | ||
| 74 | } | ||
| 75 | } else { | ||
| 76 | if (edata_is_head) { | ||
| 77 | return false; | ||
| 78 | } | ||
| 79 | } | ||
| 80 | return true; | ||
| 81 | } | ||
| 82 | |||
| 83 | JEMALLOC_ALWAYS_INLINE bool | ||
| 84 | extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents, | ||
| 85 | extent_pai_t pai, extent_state_t expected_state, bool forward, | ||
| 86 | bool expanding) { | ||
| 87 | edata_t *neighbor = contents.edata; | ||
| 88 | if (neighbor == NULL) { | ||
| 89 | return false; | ||
| 90 | } | ||
| 91 | /* It's not safe to access *neighbor yet; must verify states first. */ | ||
| 92 | bool neighbor_is_head = contents.metadata.is_head; | ||
| 93 | if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata), | ||
| 94 | neighbor_is_head, forward)) { | ||
| 95 | return false; | ||
| 96 | } | ||
| 97 | extent_state_t neighbor_state = contents.metadata.state; | ||
| 98 | if (pai == EXTENT_PAI_PAC) { | ||
| 99 | if (neighbor_state != expected_state) { | ||
| 100 | return false; | ||
| 101 | } | ||
| 102 | /* From this point, it's safe to access *neighbor. */ | ||
| 103 | if (!expanding && (edata_committed_get(edata) != | ||
| 104 | edata_committed_get(neighbor))) { | ||
| 105 | /* | ||
| 106 | * Some platforms (e.g. Windows) require an explicit | ||
| 107 | * commit step (and writing to uncommitted memory is not | ||
| 108 | * allowed). | ||
| 109 | */ | ||
| 110 | return false; | ||
| 111 | } | ||
| 112 | } else { | ||
| 113 | if (neighbor_state == extent_state_active) { | ||
| 114 | return false; | ||
| 115 | } | ||
| 116 | /* From this point, it's safe to access *neighbor. */ | ||
| 117 | } | ||
| 118 | |||
| 119 | assert(edata_pai_get(edata) == pai); | ||
| 120 | if (edata_pai_get(neighbor) != pai) { | ||
| 121 | return false; | ||
| 122 | } | ||
| 123 | if (opt_retain) { | ||
| 124 | assert(edata_arena_ind_get(edata) == | ||
| 125 | edata_arena_ind_get(neighbor)); | ||
| 126 | } else { | ||
| 127 | if (edata_arena_ind_get(edata) != | ||
| 128 | edata_arena_ind_get(neighbor)) { | ||
| 129 | return false; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor)); | ||
| 133 | |||
| 134 | return true; | ||
| 135 | } | ||
| 136 | |||
| 137 | #endif /* JEMALLOC_INTERNAL_EXTENT_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent_dss.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent_dss.h deleted file mode 100644 index e8f02ce..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent_dss.h +++ /dev/null | |||
| @@ -1,26 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H | ||
| 2 | #define JEMALLOC_INTERNAL_EXTENT_DSS_H | ||
| 3 | |||
| 4 | typedef enum { | ||
| 5 | dss_prec_disabled = 0, | ||
| 6 | dss_prec_primary = 1, | ||
| 7 | dss_prec_secondary = 2, | ||
| 8 | |||
| 9 | dss_prec_limit = 3 | ||
| 10 | } dss_prec_t; | ||
| 11 | #define DSS_PREC_DEFAULT dss_prec_secondary | ||
| 12 | #define DSS_DEFAULT "secondary" | ||
| 13 | |||
| 14 | extern const char *dss_prec_names[]; | ||
| 15 | |||
| 16 | extern const char *opt_dss; | ||
| 17 | |||
| 18 | dss_prec_t extent_dss_prec_get(void); | ||
| 19 | bool extent_dss_prec_set(dss_prec_t dss_prec); | ||
| 20 | void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, | ||
| 21 | size_t size, size_t alignment, bool *zero, bool *commit); | ||
| 22 | bool extent_in_dss(void *addr); | ||
| 23 | bool extent_dss_mergeable(void *addr_a, void *addr_b); | ||
| 24 | void extent_dss_boot(void); | ||
| 25 | |||
| 26 | #endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent_mmap.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent_mmap.h deleted file mode 100644 index 55f17ee..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/extent_mmap.h +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H | ||
| 3 | |||
| 4 | extern bool opt_retain; | ||
| 5 | |||
| 6 | void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, | ||
| 7 | bool *zero, bool *commit); | ||
| 8 | bool extent_dalloc_mmap(void *addr, size_t size); | ||
| 9 | |||
| 10 | #endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/fb.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/fb.h deleted file mode 100644 index 90c4091..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/fb.h +++ /dev/null | |||
| @@ -1,373 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_FB_H | ||
| 2 | #define JEMALLOC_INTERNAL_FB_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * The flat bitmap module. This has a larger API relative to the bitmap module | ||
| 6 | * (supporting things like backwards searches, and searching for both set and | ||
| 7 | * unset bits), at the cost of slower operations for very large bitmaps. | ||
| 8 | * | ||
| 9 | * Initialized flat bitmaps start at all-zeros (all bits unset). | ||
| 10 | */ | ||
| 11 | |||
| 12 | typedef unsigned long fb_group_t; | ||
| 13 | #define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3)) | ||
| 14 | #define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \ | ||
| 15 | + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1)) | ||
| 16 | |||
| 17 | static inline void | ||
| 18 | fb_init(fb_group_t *fb, size_t nbits) { | ||
| 19 | size_t ngroups = FB_NGROUPS(nbits); | ||
| 20 | memset(fb, 0, ngroups * sizeof(fb_group_t)); | ||
| 21 | } | ||
| 22 | |||
| 23 | static inline bool | ||
| 24 | fb_empty(fb_group_t *fb, size_t nbits) { | ||
| 25 | size_t ngroups = FB_NGROUPS(nbits); | ||
| 26 | for (size_t i = 0; i < ngroups; i++) { | ||
| 27 | if (fb[i] != 0) { | ||
| 28 | return false; | ||
| 29 | } | ||
| 30 | } | ||
| 31 | return true; | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline bool | ||
| 35 | fb_full(fb_group_t *fb, size_t nbits) { | ||
| 36 | size_t ngroups = FB_NGROUPS(nbits); | ||
| 37 | size_t trailing_bits = nbits % FB_GROUP_BITS; | ||
| 38 | size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1); | ||
| 39 | for (size_t i = 0; i < limit; i++) { | ||
| 40 | if (fb[i] != ~(fb_group_t)0) { | ||
| 41 | return false; | ||
| 42 | } | ||
| 43 | } | ||
| 44 | if (trailing_bits == 0) { | ||
| 45 | return true; | ||
| 46 | } | ||
| 47 | return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1; | ||
| 48 | } | ||
| 49 | |||
| 50 | static inline bool | ||
| 51 | fb_get(fb_group_t *fb, size_t nbits, size_t bit) { | ||
| 52 | assert(bit < nbits); | ||
| 53 | size_t group_ind = bit / FB_GROUP_BITS; | ||
| 54 | size_t bit_ind = bit % FB_GROUP_BITS; | ||
| 55 | return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind)); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void | ||
| 59 | fb_set(fb_group_t *fb, size_t nbits, size_t bit) { | ||
| 60 | assert(bit < nbits); | ||
| 61 | size_t group_ind = bit / FB_GROUP_BITS; | ||
| 62 | size_t bit_ind = bit % FB_GROUP_BITS; | ||
| 63 | fb[group_ind] |= ((fb_group_t)1 << bit_ind); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void | ||
| 67 | fb_unset(fb_group_t *fb, size_t nbits, size_t bit) { | ||
| 68 | assert(bit < nbits); | ||
| 69 | size_t group_ind = bit / FB_GROUP_BITS; | ||
| 70 | size_t bit_ind = bit % FB_GROUP_BITS; | ||
| 71 | fb[group_ind] &= ~((fb_group_t)1 << bit_ind); | ||
| 72 | } | ||
| 73 | |||
| 74 | |||
| 75 | /* | ||
| 76 | * Some implementation details. This visitation function lets us apply a group | ||
| 77 | * visitor to each group in the bitmap (potentially modifying it). The mask | ||
| 78 | * indicates which bits are logically part of the visitation. | ||
| 79 | */ | ||
| 80 | typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask); | ||
| 81 | JEMALLOC_ALWAYS_INLINE void | ||
| 82 | fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx, | ||
| 83 | size_t start, size_t cnt) { | ||
| 84 | assert(cnt > 0); | ||
| 85 | assert(start + cnt <= nbits); | ||
| 86 | size_t group_ind = start / FB_GROUP_BITS; | ||
| 87 | size_t start_bit_ind = start % FB_GROUP_BITS; | ||
| 88 | /* | ||
| 89 | * The first group is special; it's the only one we don't start writing | ||
| 90 | * to from bit 0. | ||
| 91 | */ | ||
| 92 | size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS | ||
| 93 | ? FB_GROUP_BITS - start_bit_ind : cnt); | ||
| 94 | /* | ||
| 95 | * We can basically split affected words into: | ||
| 96 | * - The first group, where we touch only the high bits | ||
| 97 | * - The last group, where we touch only the low bits | ||
| 98 | * - The middle, where we set all the bits to the same thing. | ||
| 99 | * We treat each case individually. The last two could be merged, but | ||
| 100 | * this can lead to bad codegen for those middle words. | ||
| 101 | */ | ||
| 102 | /* First group */ | ||
| 103 | fb_group_t mask = ((~(fb_group_t)0) | ||
| 104 | >> (FB_GROUP_BITS - first_group_cnt)) | ||
| 105 | << start_bit_ind; | ||
| 106 | visit(ctx, &fb[group_ind], mask); | ||
| 107 | |||
| 108 | cnt -= first_group_cnt; | ||
| 109 | group_ind++; | ||
| 110 | /* Middle groups */ | ||
| 111 | while (cnt > FB_GROUP_BITS) { | ||
| 112 | visit(ctx, &fb[group_ind], ~(fb_group_t)0); | ||
| 113 | cnt -= FB_GROUP_BITS; | ||
| 114 | group_ind++; | ||
| 115 | } | ||
| 116 | /* Last group */ | ||
| 117 | if (cnt != 0) { | ||
| 118 | mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt); | ||
| 119 | visit(ctx, &fb[group_ind], mask); | ||
| 120 | } | ||
| 121 | } | ||
| 122 | |||
| 123 | JEMALLOC_ALWAYS_INLINE void | ||
| 124 | fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) { | ||
| 125 | bool val = *(bool *)ctx; | ||
| 126 | if (val) { | ||
| 127 | *fb |= mask; | ||
| 128 | } else { | ||
| 129 | *fb &= ~mask; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | /* Sets the cnt bits starting at position start. Must not have a 0 count. */ | ||
| 134 | static inline void | ||
| 135 | fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { | ||
| 136 | bool val = true; | ||
| 137 | fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt); | ||
| 138 | } | ||
| 139 | |||
| 140 | /* Unsets the cnt bits starting at position start. Must not have a 0 count. */ | ||
| 141 | static inline void | ||
| 142 | fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { | ||
| 143 | bool val = false; | ||
| 144 | fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt); | ||
| 145 | } | ||
| 146 | |||
| 147 | JEMALLOC_ALWAYS_INLINE void | ||
| 148 | fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) { | ||
| 149 | size_t *scount = (size_t *)ctx; | ||
| 150 | *scount += popcount_lu(*fb & mask); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* Finds the number of set bit in the of length cnt starting at start. */ | ||
| 154 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 155 | fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { | ||
| 156 | size_t scount = 0; | ||
| 157 | fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt); | ||
| 158 | return scount; | ||
| 159 | } | ||
| 160 | |||
| 161 | /* Finds the number of unset bit in the of length cnt starting at start. */ | ||
| 162 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 163 | fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { | ||
| 164 | size_t scount = fb_scount(fb, nbits, start, cnt); | ||
| 165 | return cnt - scount; | ||
| 166 | } | ||
| 167 | |||
| 168 | /* | ||
| 169 | * An implementation detail; find the first bit at position >= min_bit with the | ||
| 170 | * value val. | ||
| 171 | * | ||
| 172 | * Returns the number of bits in the bitmap if no such bit exists. | ||
| 173 | */ | ||
| 174 | JEMALLOC_ALWAYS_INLINE ssize_t | ||
| 175 | fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val, | ||
| 176 | bool forward) { | ||
| 177 | assert(start < nbits); | ||
| 178 | size_t ngroups = FB_NGROUPS(nbits); | ||
| 179 | ssize_t group_ind = start / FB_GROUP_BITS; | ||
| 180 | size_t bit_ind = start % FB_GROUP_BITS; | ||
| 181 | |||
| 182 | fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1); | ||
| 183 | |||
| 184 | fb_group_t group = fb[group_ind]; | ||
| 185 | group ^= maybe_invert; | ||
| 186 | if (forward) { | ||
| 187 | /* Only keep ones in bits bit_ind and above. */ | ||
| 188 | group &= ~((1LU << bit_ind) - 1); | ||
| 189 | } else { | ||
| 190 | /* | ||
| 191 | * Only keep ones in bits bit_ind and below. You might more | ||
| 192 | * naturally express this as (1 << (bit_ind + 1)) - 1, but | ||
| 193 | * that shifts by an invalid amount if bit_ind is one less than | ||
| 194 | * FB_GROUP_BITS. | ||
| 195 | */ | ||
| 196 | group &= ((2LU << bit_ind) - 1); | ||
| 197 | } | ||
| 198 | ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1; | ||
| 199 | while (group == 0) { | ||
| 200 | group_ind += forward ? 1 : -1; | ||
| 201 | if (group_ind == group_ind_bound) { | ||
| 202 | return forward ? (ssize_t)nbits : (ssize_t)-1; | ||
| 203 | } | ||
| 204 | group = fb[group_ind]; | ||
| 205 | group ^= maybe_invert; | ||
| 206 | } | ||
| 207 | assert(group != 0); | ||
| 208 | size_t bit = forward ? ffs_lu(group) : fls_lu(group); | ||
| 209 | size_t pos = group_ind * FB_GROUP_BITS + bit; | ||
| 210 | /* | ||
| 211 | * The high bits of a partially filled last group are zeros, so if we're | ||
| 212 | * looking for zeros we don't want to report an invalid result. | ||
| 213 | */ | ||
| 214 | if (forward && !val && pos > nbits) { | ||
| 215 | return nbits; | ||
| 216 | } | ||
| 217 | return pos; | ||
| 218 | } | ||
| 219 | |||
| 220 | /* | ||
| 221 | * Find the first set bit in the bitmap with an index >= min_bit. Returns the | ||
| 222 | * number of bits in the bitmap if no such bit exists. | ||
| 223 | */ | ||
| 224 | static inline size_t | ||
| 225 | fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) { | ||
| 226 | return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false, | ||
| 227 | /* forward */ true); | ||
| 228 | } | ||
| 229 | |||
| 230 | /* The same, but looks for an unset bit. */ | ||
| 231 | static inline size_t | ||
| 232 | fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) { | ||
| 233 | return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true, | ||
| 234 | /* forward */ true); | ||
| 235 | } | ||
| 236 | |||
| 237 | /* | ||
| 238 | * Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if | ||
| 239 | * no such bit exists. | ||
| 240 | */ | ||
| 241 | static inline ssize_t | ||
| 242 | fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) { | ||
| 243 | return fb_find_impl(fb, nbits, max_bit, /* val */ false, | ||
| 244 | /* forward */ false); | ||
| 245 | } | ||
| 246 | |||
| 247 | static inline ssize_t | ||
| 248 | fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) { | ||
| 249 | return fb_find_impl(fb, nbits, max_bit, /* val */ true, | ||
| 250 | /* forward */ false); | ||
| 251 | } | ||
| 252 | |||
| 253 | /* Returns whether or not we found a range. */ | ||
| 254 | JEMALLOC_ALWAYS_INLINE bool | ||
| 255 | fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, | ||
| 256 | size_t *r_len, bool val, bool forward) { | ||
| 257 | assert(start < nbits); | ||
| 258 | ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward); | ||
| 259 | if ((forward && next_range_begin == (ssize_t)nbits) | ||
| 260 | || (!forward && next_range_begin == (ssize_t)-1)) { | ||
| 261 | return false; | ||
| 262 | } | ||
| 263 | /* Half open range; the set bits are [begin, end). */ | ||
| 264 | ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val, | ||
| 265 | forward); | ||
| 266 | if (forward) { | ||
| 267 | *r_begin = next_range_begin; | ||
| 268 | *r_len = next_range_end - next_range_begin; | ||
| 269 | } else { | ||
| 270 | *r_begin = next_range_end + 1; | ||
| 271 | *r_len = next_range_begin - next_range_end; | ||
| 272 | } | ||
| 273 | return true; | ||
| 274 | } | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Used to iterate through ranges of set bits. | ||
| 278 | * | ||
| 279 | * Tries to find the next contiguous sequence of set bits with a first index >= | ||
| 280 | * start. If one exists, puts the earliest bit of the range in *r_begin, its | ||
| 281 | * length in *r_len, and returns true. Otherwise, returns false (without | ||
| 282 | * touching *r_begin or *r_end). | ||
| 283 | */ | ||
| 284 | static inline bool | ||
| 285 | fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, | ||
| 286 | size_t *r_len) { | ||
| 287 | return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, | ||
| 288 | /* val */ true, /* forward */ true); | ||
| 289 | } | ||
| 290 | |||
| 291 | /* | ||
| 292 | * The same as fb_srange_iter, but searches backwards from start rather than | ||
| 293 | * forwards. (The position returned is still the earliest bit in the range). | ||
| 294 | */ | ||
| 295 | static inline bool | ||
| 296 | fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, | ||
| 297 | size_t *r_len) { | ||
| 298 | return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, | ||
| 299 | /* val */ true, /* forward */ false); | ||
| 300 | } | ||
| 301 | |||
| 302 | /* Similar to fb_srange_iter, but searches for unset bits. */ | ||
| 303 | static inline bool | ||
| 304 | fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, | ||
| 305 | size_t *r_len) { | ||
| 306 | return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, | ||
| 307 | /* val */ false, /* forward */ true); | ||
| 308 | } | ||
| 309 | |||
| 310 | /* Similar to fb_srange_riter, but searches for unset bits. */ | ||
| 311 | static inline bool | ||
| 312 | fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, | ||
| 313 | size_t *r_len) { | ||
| 314 | return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, | ||
| 315 | /* val */ false, /* forward */ false); | ||
| 316 | } | ||
| 317 | |||
| 318 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 319 | fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) { | ||
| 320 | size_t begin = 0; | ||
| 321 | size_t longest_len = 0; | ||
| 322 | size_t len = 0; | ||
| 323 | while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin, | ||
| 324 | &len, val, /* forward */ true)) { | ||
| 325 | if (len > longest_len) { | ||
| 326 | longest_len = len; | ||
| 327 | } | ||
| 328 | begin += len; | ||
| 329 | } | ||
| 330 | return longest_len; | ||
| 331 | } | ||
| 332 | |||
| 333 | static inline size_t | ||
| 334 | fb_srange_longest(fb_group_t *fb, size_t nbits) { | ||
| 335 | return fb_range_longest_impl(fb, nbits, /* val */ true); | ||
| 336 | } | ||
| 337 | |||
| 338 | static inline size_t | ||
| 339 | fb_urange_longest(fb_group_t *fb, size_t nbits) { | ||
| 340 | return fb_range_longest_impl(fb, nbits, /* val */ false); | ||
| 341 | } | ||
| 342 | |||
| 343 | /* | ||
| 344 | * Initializes each bit of dst with the bitwise-AND of the corresponding bits of | ||
| 345 | * src1 and src2. All bitmaps must be the same size. | ||
| 346 | */ | ||
| 347 | static inline void | ||
| 348 | fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) { | ||
| 349 | size_t ngroups = FB_NGROUPS(nbits); | ||
| 350 | for (size_t i = 0; i < ngroups; i++) { | ||
| 351 | dst[i] = src1[i] & src2[i]; | ||
| 352 | } | ||
| 353 | } | ||
| 354 | |||
| 355 | /* Like fb_bit_and, but with bitwise-OR. */ | ||
| 356 | static inline void | ||
| 357 | fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) { | ||
| 358 | size_t ngroups = FB_NGROUPS(nbits); | ||
| 359 | for (size_t i = 0; i < ngroups; i++) { | ||
| 360 | dst[i] = src1[i] | src2[i]; | ||
| 361 | } | ||
| 362 | } | ||
| 363 | |||
| 364 | /* Initializes dst bit i to the negation of source bit i. */ | ||
| 365 | static inline void | ||
| 366 | fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) { | ||
| 367 | size_t ngroups = FB_NGROUPS(nbits); | ||
| 368 | for (size_t i = 0; i < ngroups; i++) { | ||
| 369 | dst[i] = ~src[i]; | ||
| 370 | } | ||
| 371 | } | ||
| 372 | |||
| 373 | #endif /* JEMALLOC_INTERNAL_FB_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/fxp.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/fxp.h deleted file mode 100644 index 415a982..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/fxp.h +++ /dev/null | |||
| @@ -1,126 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_FXP_H | ||
| 2 | #define JEMALLOC_INTERNAL_FXP_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * A simple fixed-point math implementation, supporting only unsigned values | ||
| 6 | * (with overflow being an error). | ||
| 7 | * | ||
| 8 | * It's not in general safe to use floating point in core code, because various | ||
| 9 | * libc implementations we get linked against can assume that malloc won't touch | ||
| 10 | * floating point state and call it with an unusual calling convention. | ||
| 11 | */ | ||
| 12 | |||
| 13 | /* | ||
| 14 | * High 16 bits are the integer part, low 16 are the fractional part. Or | ||
| 15 | * equivalently, repr == 2**16 * val, where we use "val" to refer to the | ||
| 16 | * (imaginary) fractional representation of the true value. | ||
| 17 | * | ||
| 18 | * We pick a uint32_t here since it's convenient in some places to | ||
| 19 | * double the representation size (i.e. multiplication and division use | ||
| 20 | * 64-bit integer types), and a uint64_t is the largest type we're | ||
| 21 | * certain is available. | ||
| 22 | */ | ||
| 23 | typedef uint32_t fxp_t; | ||
| 24 | #define FXP_INIT_INT(x) ((x) << 16) | ||
| 25 | #define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100) | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Amount of precision used in parsing and printing numbers. The integer bound | ||
| 29 | * is simply because the integer part of the number gets 16 bits, and so is | ||
| 30 | * bounded by 65536. | ||
| 31 | * | ||
| 32 | * We use a lot of precision for the fractional part, even though most of it | ||
| 33 | * gets rounded off; this lets us get exact values for the important special | ||
| 34 | * case where the denominator is a small power of 2 (for instance, | ||
| 35 | * 1/512 == 0.001953125 is exactly representable even with only 16 bits of | ||
| 36 | * fractional precision). We need to left-shift by 16 before dividing by | ||
| 37 | * 10**precision, so we pick precision to be floor(log(2**48)) = 14. | ||
| 38 | */ | ||
| 39 | #define FXP_INTEGER_PART_DIGITS 5 | ||
| 40 | #define FXP_FRACTIONAL_PART_DIGITS 14 | ||
| 41 | |||
| 42 | /* | ||
| 43 | * In addition to the integer and fractional parts of the number, we need to | ||
| 44 | * include a null character and (possibly) a decimal point. | ||
| 45 | */ | ||
| 46 | #define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2) | ||
| 47 | |||
| 48 | static inline fxp_t | ||
| 49 | fxp_add(fxp_t a, fxp_t b) { | ||
| 50 | return a + b; | ||
| 51 | } | ||
| 52 | |||
| 53 | static inline fxp_t | ||
| 54 | fxp_sub(fxp_t a, fxp_t b) { | ||
| 55 | assert(a >= b); | ||
| 56 | return a - b; | ||
| 57 | } | ||
| 58 | |||
| 59 | static inline fxp_t | ||
| 60 | fxp_mul(fxp_t a, fxp_t b) { | ||
| 61 | uint64_t unshifted = (uint64_t)a * (uint64_t)b; | ||
| 62 | /* | ||
| 63 | * Unshifted is (a.val * 2**16) * (b.val * 2**16) | ||
| 64 | * == (a.val * b.val) * 2**32, but we want | ||
| 65 | * (a.val * b.val) * 2 ** 16. | ||
| 66 | */ | ||
| 67 | return (uint32_t)(unshifted >> 16); | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline fxp_t | ||
| 71 | fxp_div(fxp_t a, fxp_t b) { | ||
| 72 | assert(b != 0); | ||
| 73 | uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b; | ||
| 74 | /* | ||
| 75 | * Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16) | ||
| 76 | * == (a.val / b.val) * (2 ** 32), which again corresponds to a right | ||
| 77 | * shift of 16. | ||
| 78 | */ | ||
| 79 | return (uint32_t)(unshifted >> 16); | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline uint32_t | ||
| 83 | fxp_round_down(fxp_t a) { | ||
| 84 | return a >> 16; | ||
| 85 | } | ||
| 86 | |||
| 87 | static inline uint32_t | ||
| 88 | fxp_round_nearest(fxp_t a) { | ||
| 89 | uint32_t fractional_part = (a & ((1U << 16) - 1)); | ||
| 90 | uint32_t increment = (uint32_t)(fractional_part >= (1U << 15)); | ||
| 91 | return (a >> 16) + increment; | ||
| 92 | } | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Approximately computes x * frac, without the size limitations that would be | ||
| 96 | * imposed by converting u to an fxp_t. | ||
| 97 | */ | ||
| 98 | static inline size_t | ||
| 99 | fxp_mul_frac(size_t x_orig, fxp_t frac) { | ||
| 100 | assert(frac <= (1U << 16)); | ||
| 101 | /* | ||
| 102 | * Work around an over-enthusiastic warning about type limits below (on | ||
| 103 | * 32-bit platforms, a size_t is always less than 1ULL << 48). | ||
| 104 | */ | ||
| 105 | uint64_t x = (uint64_t)x_orig; | ||
| 106 | /* | ||
| 107 | * If we can guarantee no overflow, multiply first before shifting, to | ||
| 108 | * preserve some precision. Otherwise, shift first and then multiply. | ||
| 109 | * In the latter case, we only lose the low 16 bits of a 48-bit number, | ||
| 110 | * so we're still accurate to within 1/2**32. | ||
| 111 | */ | ||
| 112 | if (x < (1ULL << 48)) { | ||
| 113 | return (size_t)((x * frac) >> 16); | ||
| 114 | } else { | ||
| 115 | return (size_t)((x >> 16) * (uint64_t)frac); | ||
| 116 | } | ||
| 117 | } | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Returns true on error. Otherwise, returns false and updates *ptr to point to | ||
| 121 | * the first character not parsed (because it wasn't a digit). | ||
| 122 | */ | ||
| 123 | bool fxp_parse(fxp_t *a, const char *ptr, char **end); | ||
| 124 | void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]); | ||
| 125 | |||
| 126 | #endif /* JEMALLOC_INTERNAL_FXP_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hash.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hash.h deleted file mode 100644 index 7f94567..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hash.h +++ /dev/null | |||
| @@ -1,320 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_HASH_H | ||
| 2 | #define JEMALLOC_INTERNAL_HASH_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/assert.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * The following hash function is based on MurmurHash3, placed into the public | ||
| 8 | * domain by Austin Appleby. See https://github.com/aappleby/smhasher for | ||
| 9 | * details. | ||
| 10 | */ | ||
| 11 | |||
| 12 | /******************************************************************************/ | ||
| 13 | /* Internal implementation. */ | ||
| 14 | static inline uint32_t | ||
| 15 | hash_rotl_32(uint32_t x, int8_t r) { | ||
| 16 | return ((x << r) | (x >> (32 - r))); | ||
| 17 | } | ||
| 18 | |||
| 19 | static inline uint64_t | ||
| 20 | hash_rotl_64(uint64_t x, int8_t r) { | ||
| 21 | return ((x << r) | (x >> (64 - r))); | ||
| 22 | } | ||
| 23 | |||
| 24 | static inline uint32_t | ||
| 25 | hash_get_block_32(const uint32_t *p, int i) { | ||
| 26 | /* Handle unaligned read. */ | ||
| 27 | if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { | ||
| 28 | uint32_t ret; | ||
| 29 | |||
| 30 | memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); | ||
| 31 | return ret; | ||
| 32 | } | ||
| 33 | |||
| 34 | return p[i]; | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline uint64_t | ||
| 38 | hash_get_block_64(const uint64_t *p, int i) { | ||
| 39 | /* Handle unaligned read. */ | ||
| 40 | if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { | ||
| 41 | uint64_t ret; | ||
| 42 | |||
| 43 | memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); | ||
| 44 | return ret; | ||
| 45 | } | ||
| 46 | |||
| 47 | return p[i]; | ||
| 48 | } | ||
| 49 | |||
| 50 | static inline uint32_t | ||
| 51 | hash_fmix_32(uint32_t h) { | ||
| 52 | h ^= h >> 16; | ||
| 53 | h *= 0x85ebca6b; | ||
| 54 | h ^= h >> 13; | ||
| 55 | h *= 0xc2b2ae35; | ||
| 56 | h ^= h >> 16; | ||
| 57 | |||
| 58 | return h; | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline uint64_t | ||
| 62 | hash_fmix_64(uint64_t k) { | ||
| 63 | k ^= k >> 33; | ||
| 64 | k *= KQU(0xff51afd7ed558ccd); | ||
| 65 | k ^= k >> 33; | ||
| 66 | k *= KQU(0xc4ceb9fe1a85ec53); | ||
| 67 | k ^= k >> 33; | ||
| 68 | |||
| 69 | return k; | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline uint32_t | ||
| 73 | hash_x86_32(const void *key, int len, uint32_t seed) { | ||
| 74 | const uint8_t *data = (const uint8_t *) key; | ||
| 75 | const int nblocks = len / 4; | ||
| 76 | |||
| 77 | uint32_t h1 = seed; | ||
| 78 | |||
| 79 | const uint32_t c1 = 0xcc9e2d51; | ||
| 80 | const uint32_t c2 = 0x1b873593; | ||
| 81 | |||
| 82 | /* body */ | ||
| 83 | { | ||
| 84 | const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); | ||
| 85 | int i; | ||
| 86 | |||
| 87 | for (i = -nblocks; i; i++) { | ||
| 88 | uint32_t k1 = hash_get_block_32(blocks, i); | ||
| 89 | |||
| 90 | k1 *= c1; | ||
| 91 | k1 = hash_rotl_32(k1, 15); | ||
| 92 | k1 *= c2; | ||
| 93 | |||
| 94 | h1 ^= k1; | ||
| 95 | h1 = hash_rotl_32(h1, 13); | ||
| 96 | h1 = h1*5 + 0xe6546b64; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | /* tail */ | ||
| 101 | { | ||
| 102 | const uint8_t *tail = (const uint8_t *) (data + nblocks*4); | ||
| 103 | |||
| 104 | uint32_t k1 = 0; | ||
| 105 | |||
| 106 | switch (len & 3) { | ||
| 107 | case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH; | ||
| 108 | case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH; | ||
| 109 | case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); | ||
| 110 | k1 *= c2; h1 ^= k1; | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | /* finalization */ | ||
| 115 | h1 ^= len; | ||
| 116 | |||
| 117 | h1 = hash_fmix_32(h1); | ||
| 118 | |||
| 119 | return h1; | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline void | ||
| 123 | hash_x86_128(const void *key, const int len, uint32_t seed, | ||
| 124 | uint64_t r_out[2]) { | ||
| 125 | const uint8_t * data = (const uint8_t *) key; | ||
| 126 | const int nblocks = len / 16; | ||
| 127 | |||
| 128 | uint32_t h1 = seed; | ||
| 129 | uint32_t h2 = seed; | ||
| 130 | uint32_t h3 = seed; | ||
| 131 | uint32_t h4 = seed; | ||
| 132 | |||
| 133 | const uint32_t c1 = 0x239b961b; | ||
| 134 | const uint32_t c2 = 0xab0e9789; | ||
| 135 | const uint32_t c3 = 0x38b34ae5; | ||
| 136 | const uint32_t c4 = 0xa1e38b93; | ||
| 137 | |||
| 138 | /* body */ | ||
| 139 | { | ||
| 140 | const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); | ||
| 141 | int i; | ||
| 142 | |||
| 143 | for (i = -nblocks; i; i++) { | ||
| 144 | uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); | ||
| 145 | uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); | ||
| 146 | uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); | ||
| 147 | uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); | ||
| 148 | |||
| 149 | k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; | ||
| 150 | |||
| 151 | h1 = hash_rotl_32(h1, 19); h1 += h2; | ||
| 152 | h1 = h1*5 + 0x561ccd1b; | ||
| 153 | |||
| 154 | k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; | ||
| 155 | |||
| 156 | h2 = hash_rotl_32(h2, 17); h2 += h3; | ||
| 157 | h2 = h2*5 + 0x0bcaa747; | ||
| 158 | |||
| 159 | k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; | ||
| 160 | |||
| 161 | h3 = hash_rotl_32(h3, 15); h3 += h4; | ||
| 162 | h3 = h3*5 + 0x96cd1c35; | ||
| 163 | |||
| 164 | k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; | ||
| 165 | |||
| 166 | h4 = hash_rotl_32(h4, 13); h4 += h1; | ||
| 167 | h4 = h4*5 + 0x32ac3b17; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 171 | /* tail */ | ||
| 172 | { | ||
| 173 | const uint8_t *tail = (const uint8_t *) (data + nblocks*16); | ||
| 174 | uint32_t k1 = 0; | ||
| 175 | uint32_t k2 = 0; | ||
| 176 | uint32_t k3 = 0; | ||
| 177 | uint32_t k4 = 0; | ||
| 178 | |||
| 179 | switch (len & 15) { | ||
| 180 | case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH; | ||
| 181 | case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH; | ||
| 182 | case 13: k4 ^= tail[12] << 0; | ||
| 183 | k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; | ||
| 184 | JEMALLOC_FALLTHROUGH; | ||
| 185 | case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH; | ||
| 186 | case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH; | ||
| 187 | case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH; | ||
| 188 | case 9: k3 ^= tail[ 8] << 0; | ||
| 189 | k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; | ||
| 190 | JEMALLOC_FALLTHROUGH; | ||
| 191 | case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH; | ||
| 192 | case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH; | ||
| 193 | case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH; | ||
| 194 | case 5: k2 ^= tail[ 4] << 0; | ||
| 195 | k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; | ||
| 196 | JEMALLOC_FALLTHROUGH; | ||
| 197 | case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH; | ||
| 198 | case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH; | ||
| 199 | case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH; | ||
| 200 | case 1: k1 ^= tail[ 0] << 0; | ||
| 201 | k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; | ||
| 202 | break; | ||
| 203 | } | ||
| 204 | } | ||
| 205 | |||
| 206 | /* finalization */ | ||
| 207 | h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; | ||
| 208 | |||
| 209 | h1 += h2; h1 += h3; h1 += h4; | ||
| 210 | h2 += h1; h3 += h1; h4 += h1; | ||
| 211 | |||
| 212 | h1 = hash_fmix_32(h1); | ||
| 213 | h2 = hash_fmix_32(h2); | ||
| 214 | h3 = hash_fmix_32(h3); | ||
| 215 | h4 = hash_fmix_32(h4); | ||
| 216 | |||
| 217 | h1 += h2; h1 += h3; h1 += h4; | ||
| 218 | h2 += h1; h3 += h1; h4 += h1; | ||
| 219 | |||
| 220 | r_out[0] = (((uint64_t) h2) << 32) | h1; | ||
| 221 | r_out[1] = (((uint64_t) h4) << 32) | h3; | ||
| 222 | } | ||
| 223 | |||
| 224 | static inline void | ||
| 225 | hash_x64_128(const void *key, const int len, const uint32_t seed, | ||
| 226 | uint64_t r_out[2]) { | ||
| 227 | const uint8_t *data = (const uint8_t *) key; | ||
| 228 | const int nblocks = len / 16; | ||
| 229 | |||
| 230 | uint64_t h1 = seed; | ||
| 231 | uint64_t h2 = seed; | ||
| 232 | |||
| 233 | const uint64_t c1 = KQU(0x87c37b91114253d5); | ||
| 234 | const uint64_t c2 = KQU(0x4cf5ad432745937f); | ||
| 235 | |||
| 236 | /* body */ | ||
| 237 | { | ||
| 238 | const uint64_t *blocks = (const uint64_t *) (data); | ||
| 239 | int i; | ||
| 240 | |||
| 241 | for (i = 0; i < nblocks; i++) { | ||
| 242 | uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); | ||
| 243 | uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); | ||
| 244 | |||
| 245 | k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; | ||
| 246 | |||
| 247 | h1 = hash_rotl_64(h1, 27); h1 += h2; | ||
| 248 | h1 = h1*5 + 0x52dce729; | ||
| 249 | |||
| 250 | k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; | ||
| 251 | |||
| 252 | h2 = hash_rotl_64(h2, 31); h2 += h1; | ||
| 253 | h2 = h2*5 + 0x38495ab5; | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | /* tail */ | ||
| 258 | { | ||
| 259 | const uint8_t *tail = (const uint8_t*)(data + nblocks*16); | ||
| 260 | uint64_t k1 = 0; | ||
| 261 | uint64_t k2 = 0; | ||
| 262 | |||
| 263 | switch (len & 15) { | ||
| 264 | case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH; | ||
| 265 | case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH; | ||
| 266 | case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH; | ||
| 267 | case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH; | ||
| 268 | case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH; | ||
| 269 | case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH; | ||
| 270 | case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; | ||
| 271 | k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; | ||
| 272 | JEMALLOC_FALLTHROUGH; | ||
| 273 | case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH; | ||
| 274 | case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH; | ||
| 275 | case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH; | ||
| 276 | case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH; | ||
| 277 | case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH; | ||
| 278 | case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH; | ||
| 279 | case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH; | ||
| 280 | case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; | ||
| 281 | k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; | ||
| 282 | break; | ||
| 283 | } | ||
| 284 | } | ||
| 285 | |||
| 286 | /* finalization */ | ||
| 287 | h1 ^= len; h2 ^= len; | ||
| 288 | |||
| 289 | h1 += h2; | ||
| 290 | h2 += h1; | ||
| 291 | |||
| 292 | h1 = hash_fmix_64(h1); | ||
| 293 | h2 = hash_fmix_64(h2); | ||
| 294 | |||
| 295 | h1 += h2; | ||
| 296 | h2 += h1; | ||
| 297 | |||
| 298 | r_out[0] = h1; | ||
| 299 | r_out[1] = h2; | ||
| 300 | } | ||
| 301 | |||
| 302 | /******************************************************************************/ | ||
| 303 | /* API. */ | ||
| 304 | static inline void | ||
| 305 | hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { | ||
| 306 | assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ | ||
| 307 | |||
| 308 | #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) | ||
| 309 | hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); | ||
| 310 | #else | ||
| 311 | { | ||
| 312 | uint64_t hashes[2]; | ||
| 313 | hash_x86_128(key, (int)len, seed, hashes); | ||
| 314 | r_hash[0] = (size_t)hashes[0]; | ||
| 315 | r_hash[1] = (size_t)hashes[1]; | ||
| 316 | } | ||
| 317 | #endif | ||
| 318 | } | ||
| 319 | |||
| 320 | #endif /* JEMALLOC_INTERNAL_HASH_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hook.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hook.h deleted file mode 100644 index ee246b1..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hook.h +++ /dev/null | |||
| @@ -1,163 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_HOOK_H | ||
| 2 | #define JEMALLOC_INTERNAL_HOOK_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/tsd.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * This API is *extremely* experimental, and may get ripped out, changed in API- | ||
| 8 | * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc. | ||
| 9 | * | ||
| 10 | * It allows hooking the stateful parts of the API to see changes as they | ||
| 11 | * happen. | ||
| 12 | * | ||
| 13 | * Allocation hooks are called after the allocation is done, free hooks are | ||
| 14 | * called before the free is done, and expand hooks are called after the | ||
| 15 | * allocation is expanded. | ||
| 16 | * | ||
| 17 | * For realloc and rallocx, if the expansion happens in place, the expansion | ||
| 18 | * hook is called. If it is moved, then the alloc hook is called on the new | ||
| 19 | * location, and then the free hook is called on the old location (i.e. both | ||
| 20 | * hooks are invoked in between the alloc and the dalloc). | ||
| 21 | * | ||
| 22 | * If we return NULL from OOM, then usize might not be trustworthy. Calling | ||
| 23 | * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0) | ||
| 24 | * only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0), | ||
| 25 | * and only calls the alloc hook). | ||
| 26 | * | ||
| 27 | * Reentrancy: | ||
| 28 | * Reentrancy is guarded against from within the hook implementation. If you | ||
| 29 | * call allocator functions from within a hook, the hooks will not be invoked | ||
| 30 | * again. | ||
| 31 | * Threading: | ||
| 32 | * The installation of a hook synchronizes with all its uses. If you can | ||
| 33 | * prove the installation of a hook happens-before a jemalloc entry point, | ||
| 34 | * then the hook will get invoked (unless there's a racing removal). | ||
| 35 | * | ||
| 36 | * Hook insertion appears to be atomic at a per-thread level (i.e. if a thread | ||
| 37 | * allocates and has the alloc hook invoked, then a subsequent free on the | ||
| 38 | * same thread will also have the free hook invoked). | ||
| 39 | * | ||
| 40 | * The *removal* of a hook does *not* block until all threads are done with | ||
| 41 | * the hook. Hook authors have to be resilient to this, and need some | ||
| 42 | * out-of-band mechanism for cleaning up any dynamically allocated memory | ||
| 43 | * associated with their hook. | ||
| 44 | * Ordering: | ||
| 45 | * Order of hook execution is unspecified, and may be different than insertion | ||
| 46 | * order. | ||
| 47 | */ | ||
| 48 | |||
| 49 | #define HOOK_MAX 4 | ||
| 50 | |||
| 51 | enum hook_alloc_e { | ||
| 52 | hook_alloc_malloc, | ||
| 53 | hook_alloc_posix_memalign, | ||
| 54 | hook_alloc_aligned_alloc, | ||
| 55 | hook_alloc_calloc, | ||
| 56 | hook_alloc_memalign, | ||
| 57 | hook_alloc_valloc, | ||
| 58 | hook_alloc_mallocx, | ||
| 59 | |||
| 60 | /* The reallocating functions have both alloc and dalloc variants */ | ||
| 61 | hook_alloc_realloc, | ||
| 62 | hook_alloc_rallocx, | ||
| 63 | }; | ||
| 64 | /* | ||
| 65 | * We put the enum typedef after the enum, since this file may get included by | ||
| 66 | * jemalloc_cpp.cpp, and C++ disallows enum forward declarations. | ||
| 67 | */ | ||
| 68 | typedef enum hook_alloc_e hook_alloc_t; | ||
| 69 | |||
| 70 | enum hook_dalloc_e { | ||
| 71 | hook_dalloc_free, | ||
| 72 | hook_dalloc_dallocx, | ||
| 73 | hook_dalloc_sdallocx, | ||
| 74 | |||
| 75 | /* | ||
| 76 | * The dalloc halves of reallocation (not called if in-place expansion | ||
| 77 | * happens). | ||
| 78 | */ | ||
| 79 | hook_dalloc_realloc, | ||
| 80 | hook_dalloc_rallocx, | ||
| 81 | }; | ||
| 82 | typedef enum hook_dalloc_e hook_dalloc_t; | ||
| 83 | |||
| 84 | |||
| 85 | enum hook_expand_e { | ||
| 86 | hook_expand_realloc, | ||
| 87 | hook_expand_rallocx, | ||
| 88 | hook_expand_xallocx, | ||
| 89 | }; | ||
| 90 | typedef enum hook_expand_e hook_expand_t; | ||
| 91 | |||
| 92 | typedef void (*hook_alloc)( | ||
| 93 | void *extra, hook_alloc_t type, void *result, uintptr_t result_raw, | ||
| 94 | uintptr_t args_raw[3]); | ||
| 95 | |||
| 96 | typedef void (*hook_dalloc)( | ||
| 97 | void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]); | ||
| 98 | |||
| 99 | typedef void (*hook_expand)( | ||
| 100 | void *extra, hook_expand_t type, void *address, size_t old_usize, | ||
| 101 | size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); | ||
| 102 | |||
| 103 | typedef struct hooks_s hooks_t; | ||
| 104 | struct hooks_s { | ||
| 105 | hook_alloc alloc_hook; | ||
| 106 | hook_dalloc dalloc_hook; | ||
| 107 | hook_expand expand_hook; | ||
| 108 | void *extra; | ||
| 109 | }; | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Begin implementation details; everything above this point might one day live | ||
| 113 | * in a public API. Everything below this point never will. | ||
| 114 | */ | ||
| 115 | |||
| 116 | /* | ||
| 117 | * The realloc pathways haven't gotten any refactoring love in a while, and it's | ||
| 118 | * fairly difficult to pass information from the entry point to the hooks. We | ||
| 119 | * put the informaiton the hooks will need into a struct to encapsulate | ||
| 120 | * everything. | ||
| 121 | * | ||
| 122 | * Much of these pathways are force-inlined, so that the compiler can avoid | ||
| 123 | * materializing this struct until we hit an extern arena function. For fairly | ||
| 124 | * goofy reasons, *many* of the realloc paths hit an extern arena function. | ||
| 125 | * These paths are cold enough that it doesn't matter; eventually, we should | ||
| 126 | * rewrite the realloc code to make the expand-in-place and the | ||
| 127 | * free-then-realloc paths more orthogonal, at which point we don't need to | ||
| 128 | * spread the hook logic all over the place. | ||
| 129 | */ | ||
| 130 | typedef struct hook_ralloc_args_s hook_ralloc_args_t; | ||
| 131 | struct hook_ralloc_args_s { | ||
| 132 | /* I.e. as opposed to rallocx. */ | ||
| 133 | bool is_realloc; | ||
| 134 | /* | ||
| 135 | * The expand hook takes 4 arguments, even if only 3 are actually used; | ||
| 136 | * we add an extra one in case the user decides to memcpy without | ||
| 137 | * looking too closely at the hooked function. | ||
| 138 | */ | ||
| 139 | uintptr_t args[4]; | ||
| 140 | }; | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Returns an opaque handle to be used when removing the hook. NULL means that | ||
| 144 | * we couldn't install the hook. | ||
| 145 | */ | ||
| 146 | bool hook_boot(); | ||
| 147 | |||
| 148 | void *hook_install(tsdn_t *tsdn, hooks_t *hooks); | ||
| 149 | /* Uninstalls the hook with the handle previously returned from hook_install. */ | ||
| 150 | void hook_remove(tsdn_t *tsdn, void *opaque); | ||
| 151 | |||
| 152 | /* Hooks */ | ||
| 153 | |||
| 154 | void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, | ||
| 155 | uintptr_t args_raw[3]); | ||
| 156 | |||
| 157 | void hook_invoke_dalloc(hook_dalloc_t type, void *address, | ||
| 158 | uintptr_t args_raw[3]); | ||
| 159 | |||
| 160 | void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, | ||
| 161 | size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); | ||
| 162 | |||
| 163 | #endif /* JEMALLOC_INTERNAL_HOOK_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa.h deleted file mode 100644 index f356285..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa.h +++ /dev/null | |||
| @@ -1,182 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_HPA_H | ||
| 2 | #define JEMALLOC_INTERNAL_HPA_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/exp_grow.h" | ||
| 5 | #include "jemalloc/internal/hpa_hooks.h" | ||
| 6 | #include "jemalloc/internal/hpa_opts.h" | ||
| 7 | #include "jemalloc/internal/pai.h" | ||
| 8 | #include "jemalloc/internal/psset.h" | ||
| 9 | |||
| 10 | typedef struct hpa_central_s hpa_central_t; | ||
| 11 | struct hpa_central_s { | ||
| 12 | /* | ||
| 13 | * The mutex guarding most of the operations on the central data | ||
| 14 | * structure. | ||
| 15 | */ | ||
| 16 | malloc_mutex_t mtx; | ||
| 17 | /* | ||
| 18 | * Guards expansion of eden. We separate this from the regular mutex so | ||
| 19 | * that cheaper operations can still continue while we're doing the OS | ||
| 20 | * call. | ||
| 21 | */ | ||
| 22 | malloc_mutex_t grow_mtx; | ||
| 23 | /* | ||
| 24 | * Either NULL (if empty), or some integer multiple of a | ||
| 25 | * hugepage-aligned number of hugepages. We carve them off one at a | ||
| 26 | * time to satisfy new pageslab requests. | ||
| 27 | * | ||
| 28 | * Guarded by grow_mtx. | ||
| 29 | */ | ||
| 30 | void *eden; | ||
| 31 | size_t eden_len; | ||
| 32 | /* Source for metadata. */ | ||
| 33 | base_t *base; | ||
| 34 | /* Number of grow operations done on this hpa_central_t. */ | ||
| 35 | uint64_t age_counter; | ||
| 36 | |||
| 37 | /* The HPA hooks. */ | ||
| 38 | hpa_hooks_t hooks; | ||
| 39 | }; | ||
| 40 | |||
| 41 | typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t; | ||
| 42 | struct hpa_shard_nonderived_stats_s { | ||
| 43 | /* | ||
| 44 | * The number of times we've purged within a hugepage. | ||
| 45 | * | ||
| 46 | * Guarded by mtx. | ||
| 47 | */ | ||
| 48 | uint64_t npurge_passes; | ||
| 49 | /* | ||
| 50 | * The number of individual purge calls we perform (which should always | ||
| 51 | * be bigger than npurge_passes, since each pass purges at least one | ||
| 52 | * extent within a hugepage. | ||
| 53 | * | ||
| 54 | * Guarded by mtx. | ||
| 55 | */ | ||
| 56 | uint64_t npurges; | ||
| 57 | |||
| 58 | /* | ||
| 59 | * The number of times we've hugified a pageslab. | ||
| 60 | * | ||
| 61 | * Guarded by mtx. | ||
| 62 | */ | ||
| 63 | uint64_t nhugifies; | ||
| 64 | /* | ||
| 65 | * The number of times we've dehugified a pageslab. | ||
| 66 | * | ||
| 67 | * Guarded by mtx. | ||
| 68 | */ | ||
| 69 | uint64_t ndehugifies; | ||
| 70 | }; | ||
| 71 | |||
| 72 | /* Completely derived; only used by CTL. */ | ||
| 73 | typedef struct hpa_shard_stats_s hpa_shard_stats_t; | ||
| 74 | struct hpa_shard_stats_s { | ||
| 75 | psset_stats_t psset_stats; | ||
| 76 | hpa_shard_nonderived_stats_t nonderived_stats; | ||
| 77 | }; | ||
| 78 | |||
| 79 | typedef struct hpa_shard_s hpa_shard_t; | ||
| 80 | struct hpa_shard_s { | ||
| 81 | /* | ||
| 82 | * pai must be the first member; we cast from a pointer to it to a | ||
| 83 | * pointer to the hpa_shard_t. | ||
| 84 | */ | ||
| 85 | pai_t pai; | ||
| 86 | |||
| 87 | /* The central allocator we get our hugepages from. */ | ||
| 88 | hpa_central_t *central; | ||
| 89 | /* Protects most of this shard's state. */ | ||
| 90 | malloc_mutex_t mtx; | ||
| 91 | /* | ||
| 92 | * Guards the shard's access to the central allocator (preventing | ||
| 93 | * multiple threads operating on this shard from accessing the central | ||
| 94 | * allocator). | ||
| 95 | */ | ||
| 96 | malloc_mutex_t grow_mtx; | ||
| 97 | /* The base metadata allocator. */ | ||
| 98 | base_t *base; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * This edata cache is the one we use when allocating a small extent | ||
| 102 | * from a pageslab. The pageslab itself comes from the centralized | ||
| 103 | * allocator, and so will use its edata_cache. | ||
| 104 | */ | ||
| 105 | edata_cache_fast_t ecf; | ||
| 106 | |||
| 107 | psset_t psset; | ||
| 108 | |||
| 109 | /* | ||
| 110 | * How many grow operations have occurred. | ||
| 111 | * | ||
| 112 | * Guarded by grow_mtx. | ||
| 113 | */ | ||
| 114 | uint64_t age_counter; | ||
| 115 | |||
| 116 | /* The arena ind we're associated with. */ | ||
| 117 | unsigned ind; | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Our emap. This is just a cache of the emap pointer in the associated | ||
| 121 | * hpa_central. | ||
| 122 | */ | ||
| 123 | emap_t *emap; | ||
| 124 | |||
| 125 | /* The configuration choices for this hpa shard. */ | ||
| 126 | hpa_shard_opts_t opts; | ||
| 127 | |||
| 128 | /* | ||
| 129 | * How many pages have we started but not yet finished purging in this | ||
| 130 | * hpa shard. | ||
| 131 | */ | ||
| 132 | size_t npending_purge; | ||
| 133 | |||
| 134 | /* | ||
| 135 | * Those stats which are copied directly into the CTL-centric hpa shard | ||
| 136 | * stats. | ||
| 137 | */ | ||
| 138 | hpa_shard_nonderived_stats_t stats; | ||
| 139 | |||
| 140 | /* | ||
| 141 | * Last time we performed purge on this shard. | ||
| 142 | */ | ||
| 143 | nstime_t last_purge; | ||
| 144 | }; | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Whether or not the HPA can be used given the current configuration. This is | ||
| 148 | * is not necessarily a guarantee that it backs its allocations by hugepages, | ||
| 149 | * just that it can function properly given the system it's running on. | ||
| 150 | */ | ||
| 151 | bool hpa_supported(); | ||
| 152 | bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks); | ||
| 153 | bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap, | ||
| 154 | base_t *base, edata_cache_t *edata_cache, unsigned ind, | ||
| 155 | const hpa_shard_opts_t *opts); | ||
| 156 | |||
| 157 | void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src); | ||
| 158 | void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, | ||
| 159 | hpa_shard_stats_t *dst); | ||
| 160 | |||
| 161 | /* | ||
| 162 | * Notify the shard that we won't use it for allocations much longer. Due to | ||
| 163 | * the possibility of races, we don't actually prevent allocations; just flush | ||
| 164 | * and disable the embedded edata_cache_small. | ||
| 165 | */ | ||
| 166 | void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard); | ||
| 167 | void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard); | ||
| 168 | |||
| 169 | void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard, | ||
| 170 | bool deferral_allowed); | ||
| 171 | void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard); | ||
| 172 | |||
| 173 | /* | ||
| 174 | * We share the fork ordering with the PA and arena prefork handling; that's why | ||
| 175 | * these are 3 and 4 rather than 0 and 1. | ||
| 176 | */ | ||
| 177 | void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard); | ||
| 178 | void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard); | ||
| 179 | void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard); | ||
| 180 | void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard); | ||
| 181 | |||
| 182 | #endif /* JEMALLOC_INTERNAL_HPA_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa_hooks.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa_hooks.h deleted file mode 100644 index 4ea221c..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa_hooks.h +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H | ||
| 2 | #define JEMALLOC_INTERNAL_HPA_HOOKS_H | ||
| 3 | |||
| 4 | typedef struct hpa_hooks_s hpa_hooks_t; | ||
| 5 | struct hpa_hooks_s { | ||
| 6 | void *(*map)(size_t size); | ||
| 7 | void (*unmap)(void *ptr, size_t size); | ||
| 8 | void (*purge)(void *ptr, size_t size); | ||
| 9 | void (*hugify)(void *ptr, size_t size); | ||
| 10 | void (*dehugify)(void *ptr, size_t size); | ||
| 11 | void (*curtime)(nstime_t *r_time, bool first_reading); | ||
| 12 | uint64_t (*ms_since)(nstime_t *r_time); | ||
| 13 | }; | ||
| 14 | |||
| 15 | extern hpa_hooks_t hpa_hooks_default; | ||
| 16 | |||
| 17 | #endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa_opts.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa_opts.h deleted file mode 100644 index ee84fea..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpa_opts.h +++ /dev/null | |||
| @@ -1,74 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_HPA_OPTS_H | ||
| 2 | #define JEMALLOC_INTERNAL_HPA_OPTS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/fxp.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * This file is morally part of hpa.h, but is split out for header-ordering | ||
| 8 | * reasons. | ||
| 9 | */ | ||
| 10 | |||
| 11 | typedef struct hpa_shard_opts_s hpa_shard_opts_t; | ||
| 12 | struct hpa_shard_opts_s { | ||
| 13 | /* | ||
| 14 | * The largest size we'll allocate out of the shard. For those | ||
| 15 | * allocations refused, the caller (in practice, the PA module) will | ||
| 16 | * fall back to the more general (for now) PAC, which can always handle | ||
| 17 | * any allocation request. | ||
| 18 | */ | ||
| 19 | size_t slab_max_alloc; | ||
| 20 | |||
| 21 | /* | ||
| 22 | * When the number of active bytes in a hugepage is >= | ||
| 23 | * hugification_threshold, we force hugify it. | ||
| 24 | */ | ||
| 25 | size_t hugification_threshold; | ||
| 26 | |||
| 27 | /* | ||
| 28 | * The HPA purges whenever the number of pages exceeds dirty_mult * | ||
| 29 | * active_pages. This may be set to (fxp_t)-1 to disable purging. | ||
| 30 | */ | ||
| 31 | fxp_t dirty_mult; | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Whether or not the PAI methods are allowed to defer work to a | ||
| 35 | * subsequent hpa_shard_do_deferred_work() call. Practically, this | ||
| 36 | * corresponds to background threads being enabled. We track this | ||
| 37 | * ourselves for encapsulation purposes. | ||
| 38 | */ | ||
| 39 | bool deferral_allowed; | ||
| 40 | |||
| 41 | /* | ||
| 42 | * How long a hugepage has to be a hugification candidate before it will | ||
| 43 | * actually get hugified. | ||
| 44 | */ | ||
| 45 | uint64_t hugify_delay_ms; | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Minimum amount of time between purges. | ||
| 49 | */ | ||
| 50 | uint64_t min_purge_interval_ms; | ||
| 51 | }; | ||
| 52 | |||
| 53 | #define HPA_SHARD_OPTS_DEFAULT { \ | ||
| 54 | /* slab_max_alloc */ \ | ||
| 55 | 64 * 1024, \ | ||
| 56 | /* hugification_threshold */ \ | ||
| 57 | HUGEPAGE * 95 / 100, \ | ||
| 58 | /* dirty_mult */ \ | ||
| 59 | FXP_INIT_PERCENT(25), \ | ||
| 60 | /* \ | ||
| 61 | * deferral_allowed \ | ||
| 62 | * \ | ||
| 63 | * Really, this is always set by the arena during creation \ | ||
| 64 | * or by an hpa_shard_set_deferral_allowed call, so the value \ | ||
| 65 | * we put here doesn't matter. \ | ||
| 66 | */ \ | ||
| 67 | false, \ | ||
| 68 | /* hugify_delay_ms */ \ | ||
| 69 | 10 * 1000, \ | ||
| 70 | /* min_purge_interval_ms */ \ | ||
| 71 | 5 * 1000 \ | ||
| 72 | } | ||
| 73 | |||
| 74 | #endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpdata.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpdata.h deleted file mode 100644 index 1fb534d..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/hpdata.h +++ /dev/null | |||
| @@ -1,413 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_HPDATA_H | ||
| 2 | #define JEMALLOC_INTERNAL_HPDATA_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/fb.h" | ||
| 5 | #include "jemalloc/internal/ph.h" | ||
| 6 | #include "jemalloc/internal/ql.h" | ||
| 7 | #include "jemalloc/internal/typed_list.h" | ||
| 8 | |||
| 9 | /* | ||
| 10 | * The metadata representation we use for extents in hugepages. While the PAC | ||
| 11 | * uses the edata_t to represent both active and inactive extents, the HP only | ||
| 12 | * uses the edata_t for active ones; instead, inactive extent state is tracked | ||
| 13 | * within hpdata associated with the enclosing hugepage-sized, hugepage-aligned | ||
| 14 | * region of virtual address space. | ||
| 15 | * | ||
| 16 | * An hpdata need not be "truly" backed by a hugepage (which is not necessarily | ||
| 17 | * an observable property of any given region of address space). It's just | ||
| 18 | * hugepage-sized and hugepage-aligned; it's *potentially* huge. | ||
| 19 | */ | ||
| 20 | typedef struct hpdata_s hpdata_t; | ||
| 21 | ph_structs(hpdata_age_heap, hpdata_t); | ||
| 22 | struct hpdata_s { | ||
| 23 | /* | ||
| 24 | * We likewise follow the edata convention of mangling names and forcing | ||
| 25 | * the use of accessors -- this lets us add some consistency checks on | ||
| 26 | * access. | ||
| 27 | */ | ||
| 28 | |||
| 29 | /* | ||
| 30 | * The address of the hugepage in question. This can't be named h_addr, | ||
| 31 | * since that conflicts with a macro defined in Windows headers. | ||
| 32 | */ | ||
| 33 | void *h_address; | ||
| 34 | /* Its age (measured in psset operations). */ | ||
| 35 | uint64_t h_age; | ||
| 36 | /* Whether or not we think the hugepage is mapped that way by the OS. */ | ||
| 37 | bool h_huge; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * For some properties, we keep parallel sets of bools; h_foo_allowed | ||
| 41 | * and h_in_psset_foo_container. This is a decoupling mechanism to | ||
| 42 | * avoid bothering the hpa (which manages policies) from the psset | ||
| 43 | * (which is the mechanism used to enforce those policies). This allows | ||
| 44 | * all the container management logic to live in one place, without the | ||
| 45 | * HPA needing to know or care how that happens. | ||
| 46 | */ | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Whether or not the hpdata is allowed to be used to serve allocations, | ||
| 50 | * and whether or not the psset is currently tracking it as such. | ||
| 51 | */ | ||
| 52 | bool h_alloc_allowed; | ||
| 53 | bool h_in_psset_alloc_container; | ||
| 54 | |||
| 55 | /* | ||
| 56 | * The same, but with purging. There's no corresponding | ||
| 57 | * h_in_psset_purge_container, because the psset (currently) always | ||
| 58 | * removes hpdatas from their containers during updates (to implement | ||
| 59 | * LRU for purging). | ||
| 60 | */ | ||
| 61 | bool h_purge_allowed; | ||
| 62 | |||
| 63 | /* And with hugifying. */ | ||
| 64 | bool h_hugify_allowed; | ||
| 65 | /* When we became a hugification candidate. */ | ||
| 66 | nstime_t h_time_hugify_allowed; | ||
| 67 | bool h_in_psset_hugify_container; | ||
| 68 | |||
| 69 | /* Whether or not a purge or hugify is currently happening. */ | ||
| 70 | bool h_mid_purge; | ||
| 71 | bool h_mid_hugify; | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Whether or not the hpdata is being updated in the psset (i.e. if | ||
| 75 | * there has been a psset_update_begin call issued without a matching | ||
| 76 | * psset_update_end call). Eventually this will expand to other types | ||
| 77 | * of updates. | ||
| 78 | */ | ||
| 79 | bool h_updating; | ||
| 80 | |||
| 81 | /* Whether or not the hpdata is in a psset. */ | ||
| 82 | bool h_in_psset; | ||
| 83 | |||
| 84 | union { | ||
| 85 | /* When nonempty (and also nonfull), used by the psset bins. */ | ||
| 86 | hpdata_age_heap_link_t age_link; | ||
| 87 | /* | ||
| 88 | * When empty (or not corresponding to any hugepage), list | ||
| 89 | * linkage. | ||
| 90 | */ | ||
| 91 | ql_elm(hpdata_t) ql_link_empty; | ||
| 92 | }; | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Linkage for the psset to track candidates for purging and hugifying. | ||
| 96 | */ | ||
| 97 | ql_elm(hpdata_t) ql_link_purge; | ||
| 98 | ql_elm(hpdata_t) ql_link_hugify; | ||
| 99 | |||
| 100 | /* The length of the largest contiguous sequence of inactive pages. */ | ||
| 101 | size_t h_longest_free_range; | ||
| 102 | |||
| 103 | /* Number of active pages. */ | ||
| 104 | size_t h_nactive; | ||
| 105 | |||
| 106 | /* A bitmap with bits set in the active pages. */ | ||
| 107 | fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)]; | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Number of dirty or active pages, and a bitmap tracking them. One | ||
| 111 | * way to think of this is as which pages are dirty from the OS's | ||
| 112 | * perspective. | ||
| 113 | */ | ||
| 114 | size_t h_ntouched; | ||
| 115 | |||
| 116 | /* The touched pages (using the same definition as above). */ | ||
| 117 | fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)]; | ||
| 118 | }; | ||
| 119 | |||
| 120 | TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty) | ||
| 121 | TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge) | ||
| 122 | TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify) | ||
| 123 | |||
| 124 | ph_proto(, hpdata_age_heap, hpdata_t); | ||
| 125 | |||
| 126 | static inline void * | ||
| 127 | hpdata_addr_get(const hpdata_t *hpdata) { | ||
| 128 | return hpdata->h_address; | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline void | ||
| 132 | hpdata_addr_set(hpdata_t *hpdata, void *addr) { | ||
| 133 | assert(HUGEPAGE_ADDR2BASE(addr) == addr); | ||
| 134 | hpdata->h_address = addr; | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline uint64_t | ||
| 138 | hpdata_age_get(const hpdata_t *hpdata) { | ||
| 139 | return hpdata->h_age; | ||
| 140 | } | ||
| 141 | |||
| 142 | static inline void | ||
| 143 | hpdata_age_set(hpdata_t *hpdata, uint64_t age) { | ||
| 144 | hpdata->h_age = age; | ||
| 145 | } | ||
| 146 | |||
| 147 | static inline bool | ||
| 148 | hpdata_huge_get(const hpdata_t *hpdata) { | ||
| 149 | return hpdata->h_huge; | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline bool | ||
| 153 | hpdata_alloc_allowed_get(const hpdata_t *hpdata) { | ||
| 154 | return hpdata->h_alloc_allowed; | ||
| 155 | } | ||
| 156 | |||
| 157 | static inline void | ||
| 158 | hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) { | ||
| 159 | hpdata->h_alloc_allowed = alloc_allowed; | ||
| 160 | } | ||
| 161 | |||
| 162 | static inline bool | ||
| 163 | hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) { | ||
| 164 | return hpdata->h_in_psset_alloc_container; | ||
| 165 | } | ||
| 166 | |||
| 167 | static inline void | ||
| 168 | hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) { | ||
| 169 | assert(in_container != hpdata->h_in_psset_alloc_container); | ||
| 170 | hpdata->h_in_psset_alloc_container = in_container; | ||
| 171 | } | ||
| 172 | |||
| 173 | static inline bool | ||
| 174 | hpdata_purge_allowed_get(const hpdata_t *hpdata) { | ||
| 175 | return hpdata->h_purge_allowed; | ||
| 176 | } | ||
| 177 | |||
| 178 | static inline void | ||
| 179 | hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) { | ||
| 180 | assert(purge_allowed == false || !hpdata->h_mid_purge); | ||
| 181 | hpdata->h_purge_allowed = purge_allowed; | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline bool | ||
| 185 | hpdata_hugify_allowed_get(const hpdata_t *hpdata) { | ||
| 186 | return hpdata->h_hugify_allowed; | ||
| 187 | } | ||
| 188 | |||
| 189 | static inline void | ||
| 190 | hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) { | ||
| 191 | assert(!hpdata->h_mid_hugify); | ||
| 192 | hpdata->h_hugify_allowed = true; | ||
| 193 | hpdata->h_time_hugify_allowed = now; | ||
| 194 | } | ||
| 195 | |||
| 196 | static inline nstime_t | ||
| 197 | hpdata_time_hugify_allowed(hpdata_t *hpdata) { | ||
| 198 | return hpdata->h_time_hugify_allowed; | ||
| 199 | } | ||
| 200 | |||
| 201 | static inline void | ||
| 202 | hpdata_disallow_hugify(hpdata_t *hpdata) { | ||
| 203 | hpdata->h_hugify_allowed = false; | ||
| 204 | } | ||
| 205 | |||
| 206 | static inline bool | ||
| 207 | hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) { | ||
| 208 | return hpdata->h_in_psset_hugify_container; | ||
| 209 | } | ||
| 210 | |||
| 211 | static inline void | ||
| 212 | hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) { | ||
| 213 | assert(in_container != hpdata->h_in_psset_hugify_container); | ||
| 214 | hpdata->h_in_psset_hugify_container = in_container; | ||
| 215 | } | ||
| 216 | |||
| 217 | static inline bool | ||
| 218 | hpdata_mid_purge_get(const hpdata_t *hpdata) { | ||
| 219 | return hpdata->h_mid_purge; | ||
| 220 | } | ||
| 221 | |||
| 222 | static inline void | ||
| 223 | hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) { | ||
| 224 | assert(mid_purge != hpdata->h_mid_purge); | ||
| 225 | hpdata->h_mid_purge = mid_purge; | ||
| 226 | } | ||
| 227 | |||
| 228 | static inline bool | ||
| 229 | hpdata_mid_hugify_get(const hpdata_t *hpdata) { | ||
| 230 | return hpdata->h_mid_hugify; | ||
| 231 | } | ||
| 232 | |||
| 233 | static inline void | ||
| 234 | hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) { | ||
| 235 | assert(mid_hugify != hpdata->h_mid_hugify); | ||
| 236 | hpdata->h_mid_hugify = mid_hugify; | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline bool | ||
| 240 | hpdata_changing_state_get(const hpdata_t *hpdata) { | ||
| 241 | return hpdata->h_mid_purge || hpdata->h_mid_hugify; | ||
| 242 | } | ||
| 243 | |||
| 244 | |||
| 245 | static inline bool | ||
| 246 | hpdata_updating_get(const hpdata_t *hpdata) { | ||
| 247 | return hpdata->h_updating; | ||
| 248 | } | ||
| 249 | |||
| 250 | static inline void | ||
| 251 | hpdata_updating_set(hpdata_t *hpdata, bool updating) { | ||
| 252 | assert(updating != hpdata->h_updating); | ||
| 253 | hpdata->h_updating = updating; | ||
| 254 | } | ||
| 255 | |||
| 256 | static inline bool | ||
| 257 | hpdata_in_psset_get(const hpdata_t *hpdata) { | ||
| 258 | return hpdata->h_in_psset; | ||
| 259 | } | ||
| 260 | |||
| 261 | static inline void | ||
| 262 | hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) { | ||
| 263 | assert(in_psset != hpdata->h_in_psset); | ||
| 264 | hpdata->h_in_psset = in_psset; | ||
| 265 | } | ||
| 266 | |||
| 267 | static inline size_t | ||
| 268 | hpdata_longest_free_range_get(const hpdata_t *hpdata) { | ||
| 269 | return hpdata->h_longest_free_range; | ||
| 270 | } | ||
| 271 | |||
| 272 | static inline void | ||
| 273 | hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) { | ||
| 274 | assert(longest_free_range <= HUGEPAGE_PAGES); | ||
| 275 | hpdata->h_longest_free_range = longest_free_range; | ||
| 276 | } | ||
| 277 | |||
| 278 | static inline size_t | ||
| 279 | hpdata_nactive_get(hpdata_t *hpdata) { | ||
| 280 | return hpdata->h_nactive; | ||
| 281 | } | ||
| 282 | |||
| 283 | static inline size_t | ||
| 284 | hpdata_ntouched_get(hpdata_t *hpdata) { | ||
| 285 | return hpdata->h_ntouched; | ||
| 286 | } | ||
| 287 | |||
| 288 | static inline size_t | ||
| 289 | hpdata_ndirty_get(hpdata_t *hpdata) { | ||
| 290 | return hpdata->h_ntouched - hpdata->h_nactive; | ||
| 291 | } | ||
| 292 | |||
| 293 | static inline size_t | ||
| 294 | hpdata_nretained_get(hpdata_t *hpdata) { | ||
| 295 | return HUGEPAGE_PAGES - hpdata->h_ntouched; | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline void | ||
| 299 | hpdata_assert_empty(hpdata_t *hpdata) { | ||
| 300 | assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES)); | ||
| 301 | assert(hpdata->h_nactive == 0); | ||
| 302 | } | ||
| 303 | |||
| 304 | /* | ||
| 305 | * Only used in tests, and in hpdata_assert_consistent, below. Verifies some | ||
| 306 | * consistency properties of the hpdata (e.g. that cached counts of page stats | ||
| 307 | * match computed ones). | ||
| 308 | */ | ||
| 309 | static inline bool | ||
| 310 | hpdata_consistent(hpdata_t *hpdata) { | ||
| 311 | if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES) | ||
| 312 | != hpdata_longest_free_range_get(hpdata)) { | ||
| 313 | return false; | ||
| 314 | } | ||
| 315 | if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES) | ||
| 316 | != hpdata->h_nactive) { | ||
| 317 | return false; | ||
| 318 | } | ||
| 319 | if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES) | ||
| 320 | != hpdata->h_ntouched) { | ||
| 321 | return false; | ||
| 322 | } | ||
| 323 | if (hpdata->h_ntouched < hpdata->h_nactive) { | ||
| 324 | return false; | ||
| 325 | } | ||
| 326 | if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) { | ||
| 327 | return false; | ||
| 328 | } | ||
| 329 | if (hpdata_changing_state_get(hpdata) | ||
| 330 | && ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) { | ||
| 331 | return false; | ||
| 332 | } | ||
| 333 | if (hpdata_hugify_allowed_get(hpdata) | ||
| 334 | != hpdata_in_psset_hugify_container_get(hpdata)) { | ||
| 335 | return false; | ||
| 336 | } | ||
| 337 | return true; | ||
| 338 | } | ||
| 339 | |||
| 340 | static inline void | ||
| 341 | hpdata_assert_consistent(hpdata_t *hpdata) { | ||
| 342 | assert(hpdata_consistent(hpdata)); | ||
| 343 | } | ||
| 344 | |||
| 345 | static inline bool | ||
| 346 | hpdata_empty(hpdata_t *hpdata) { | ||
| 347 | return hpdata->h_nactive == 0; | ||
| 348 | } | ||
| 349 | |||
| 350 | static inline bool | ||
| 351 | hpdata_full(hpdata_t *hpdata) { | ||
| 352 | return hpdata->h_nactive == HUGEPAGE_PAGES; | ||
| 353 | } | ||
| 354 | |||
| 355 | void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age); | ||
| 356 | |||
| 357 | /* | ||
| 358 | * Given an hpdata which can serve an allocation request, pick and reserve an | ||
| 359 | * offset within that allocation. | ||
| 360 | */ | ||
| 361 | void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz); | ||
| 362 | void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz); | ||
| 363 | |||
| 364 | /* | ||
| 365 | * The hpdata_purge_prepare_t allows grabbing the metadata required to purge | ||
| 366 | * subranges of a hugepage while holding a lock, drop the lock during the actual | ||
| 367 | * purging of them, and reacquire it to update the metadata again. | ||
| 368 | */ | ||
| 369 | typedef struct hpdata_purge_state_s hpdata_purge_state_t; | ||
| 370 | struct hpdata_purge_state_s { | ||
| 371 | size_t npurged; | ||
| 372 | size_t ndirty_to_purge; | ||
| 373 | fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)]; | ||
| 374 | size_t next_purge_search_begin; | ||
| 375 | }; | ||
| 376 | |||
| 377 | /* | ||
| 378 | * Initializes purge state. The access to hpdata must be externally | ||
| 379 | * synchronized with other hpdata_* calls. | ||
| 380 | * | ||
| 381 | * You can tell whether or not a thread is purging or hugifying a given hpdata | ||
| 382 | * via hpdata_changing_state_get(hpdata). Racing hugification or purging | ||
| 383 | * operations aren't allowed. | ||
| 384 | * | ||
| 385 | * Once you begin purging, you have to follow through and call hpdata_purge_next | ||
| 386 | * until you're done, and then end. Allocating out of an hpdata undergoing | ||
| 387 | * purging is not allowed. | ||
| 388 | * | ||
| 389 | * Returns the number of dirty pages that will be purged. | ||
| 390 | */ | ||
| 391 | size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state); | ||
| 392 | |||
| 393 | /* | ||
| 394 | * If there are more extents to purge, sets *r_purge_addr and *r_purge_size to | ||
| 395 | * true, and returns true. Otherwise, returns false to indicate that we're | ||
| 396 | * done. | ||
| 397 | * | ||
| 398 | * This requires exclusive access to the purge state, but *not* to the hpdata. | ||
| 399 | * In particular, unreserve calls are allowed while purging (i.e. you can dalloc | ||
| 400 | * into one part of the hpdata while purging a different part). | ||
| 401 | */ | ||
| 402 | bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, | ||
| 403 | void **r_purge_addr, size_t *r_purge_size); | ||
| 404 | /* | ||
| 405 | * Updates the hpdata metadata after all purging is done. Needs external | ||
| 406 | * synchronization. | ||
| 407 | */ | ||
| 408 | void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state); | ||
| 409 | |||
| 410 | void hpdata_hugify(hpdata_t *hpdata); | ||
| 411 | void hpdata_dehugify(hpdata_t *hpdata); | ||
| 412 | |||
| 413 | #endif /* JEMALLOC_INTERNAL_HPDATA_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/inspect.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/inspect.h deleted file mode 100644 index 65fef51..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/inspect.h +++ /dev/null | |||
| @@ -1,40 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_INSPECT_H | ||
| 2 | #define JEMALLOC_INTERNAL_INSPECT_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * This module contains the heap introspection capabilities. For now they are | ||
| 6 | * exposed purely through mallctl APIs in the experimental namespace, but this | ||
| 7 | * may change over time. | ||
| 8 | */ | ||
| 9 | |||
| 10 | /* | ||
| 11 | * The following two structs are for experimental purposes. See | ||
| 12 | * experimental_utilization_query_ctl and | ||
| 13 | * experimental_utilization_batch_query_ctl in src/ctl.c. | ||
| 14 | */ | ||
| 15 | typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t; | ||
| 16 | struct inspect_extent_util_stats_s { | ||
| 17 | size_t nfree; | ||
| 18 | size_t nregs; | ||
| 19 | size_t size; | ||
| 20 | }; | ||
| 21 | |||
| 22 | typedef struct inspect_extent_util_stats_verbose_s | ||
| 23 | inspect_extent_util_stats_verbose_t; | ||
| 24 | |||
| 25 | struct inspect_extent_util_stats_verbose_s { | ||
| 26 | void *slabcur_addr; | ||
| 27 | size_t nfree; | ||
| 28 | size_t nregs; | ||
| 29 | size_t size; | ||
| 30 | size_t bin_nfree; | ||
| 31 | size_t bin_nregs; | ||
| 32 | }; | ||
| 33 | |||
| 34 | void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, | ||
| 35 | size_t *nfree, size_t *nregs, size_t *size); | ||
| 36 | void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, | ||
| 37 | size_t *nfree, size_t *nregs, size_t *size, | ||
| 38 | size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr); | ||
| 39 | |||
| 40 | #endif /* JEMALLOC_INTERNAL_INSPECT_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h deleted file mode 100644 index 983027c..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h +++ /dev/null | |||
| @@ -1,108 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_DECLS_H | ||
| 2 | #define JEMALLOC_INTERNAL_DECLS_H | ||
| 3 | |||
| 4 | #include <math.h> | ||
| 5 | #ifdef _WIN32 | ||
| 6 | # include <windows.h> | ||
| 7 | # include "msvc_compat/windows_extra.h" | ||
| 8 | # include "msvc_compat/strings.h" | ||
| 9 | # ifdef _WIN64 | ||
| 10 | # if LG_VADDR <= 32 | ||
| 11 | # error Generate the headers using x64 vcargs | ||
| 12 | # endif | ||
| 13 | # else | ||
| 14 | # if LG_VADDR > 32 | ||
| 15 | # undef LG_VADDR | ||
| 16 | # define LG_VADDR 32 | ||
| 17 | # endif | ||
| 18 | # endif | ||
| 19 | #else | ||
| 20 | # include <sys/param.h> | ||
| 21 | # include <sys/mman.h> | ||
| 22 | # if !defined(__pnacl__) && !defined(__native_client__) | ||
| 23 | # include <sys/syscall.h> | ||
| 24 | # if !defined(SYS_write) && defined(__NR_write) | ||
| 25 | # define SYS_write __NR_write | ||
| 26 | # endif | ||
| 27 | # if defined(SYS_open) && defined(__aarch64__) | ||
| 28 | /* Android headers may define SYS_open to __NR_open even though | ||
| 29 | * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ | ||
| 30 | # undef SYS_open | ||
| 31 | # endif | ||
| 32 | # include <sys/uio.h> | ||
| 33 | # endif | ||
| 34 | # include <pthread.h> | ||
| 35 | # if defined(__FreeBSD__) || defined(__DragonFly__) | ||
| 36 | # include <pthread_np.h> | ||
| 37 | # include <sched.h> | ||
| 38 | # if defined(__FreeBSD__) | ||
| 39 | # define cpu_set_t cpuset_t | ||
| 40 | # endif | ||
| 41 | # endif | ||
| 42 | # include <signal.h> | ||
| 43 | # ifdef JEMALLOC_OS_UNFAIR_LOCK | ||
| 44 | # include <os/lock.h> | ||
| 45 | # endif | ||
| 46 | # ifdef JEMALLOC_GLIBC_MALLOC_HOOK | ||
| 47 | # include <sched.h> | ||
| 48 | # endif | ||
| 49 | # include <errno.h> | ||
| 50 | # include <sys/time.h> | ||
| 51 | # include <time.h> | ||
| 52 | # ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME | ||
| 53 | # include <mach/mach_time.h> | ||
| 54 | # endif | ||
| 55 | #endif | ||
| 56 | #include <sys/types.h> | ||
| 57 | |||
| 58 | #include <limits.h> | ||
| 59 | #ifndef SIZE_T_MAX | ||
| 60 | # define SIZE_T_MAX SIZE_MAX | ||
| 61 | #endif | ||
| 62 | #ifndef SSIZE_MAX | ||
| 63 | # define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) | ||
| 64 | #endif | ||
| 65 | #include <stdarg.h> | ||
| 66 | #include <stdbool.h> | ||
| 67 | #include <stdio.h> | ||
| 68 | #include <stdlib.h> | ||
| 69 | #include <stdint.h> | ||
| 70 | #include <stddef.h> | ||
| 71 | #ifndef offsetof | ||
| 72 | # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) | ||
| 73 | #endif | ||
| 74 | #include <string.h> | ||
| 75 | #include <strings.h> | ||
| 76 | #include <ctype.h> | ||
| 77 | #ifdef _MSC_VER | ||
| 78 | # include <io.h> | ||
| 79 | typedef intptr_t ssize_t; | ||
| 80 | # define PATH_MAX 1024 | ||
| 81 | # define STDERR_FILENO 2 | ||
| 82 | # define __func__ __FUNCTION__ | ||
| 83 | # ifdef JEMALLOC_HAS_RESTRICT | ||
| 84 | # define restrict __restrict | ||
| 85 | # endif | ||
| 86 | /* Disable warnings about deprecated system functions. */ | ||
| 87 | # pragma warning(disable: 4996) | ||
| 88 | #if _MSC_VER < 1800 | ||
| 89 | static int | ||
| 90 | isblank(int c) { | ||
| 91 | return (c == '\t' || c == ' '); | ||
| 92 | } | ||
| 93 | #endif | ||
| 94 | #else | ||
| 95 | # include <unistd.h> | ||
| 96 | #endif | ||
| 97 | #include <fcntl.h> | ||
| 98 | |||
| 99 | /* | ||
| 100 | * The Win32 midl compiler has #define small char; we don't use midl, but | ||
| 101 | * "small" is a nice identifier to have available when talking about size | ||
| 102 | * classes. | ||
| 103 | */ | ||
| 104 | #ifdef small | ||
| 105 | # undef small | ||
| 106 | #endif | ||
| 107 | |||
| 108 | #endif /* JEMALLOC_INTERNAL_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in deleted file mode 100644 index 3588072..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ /dev/null | |||
| @@ -1,427 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_DEFS_H_ | ||
| 2 | #define JEMALLOC_INTERNAL_DEFS_H_ | ||
| 3 | /* | ||
| 4 | * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all | ||
| 5 | * public APIs to be prefixed. This makes it possible, with some care, to use | ||
| 6 | * multiple allocators simultaneously. | ||
| 7 | */ | ||
| 8 | #undef JEMALLOC_PREFIX | ||
| 9 | #undef JEMALLOC_CPREFIX | ||
| 10 | |||
| 11 | /* | ||
| 12 | * Define overrides for non-standard allocator-related functions if they are | ||
| 13 | * present on the system. | ||
| 14 | */ | ||
| 15 | #undef JEMALLOC_OVERRIDE___LIBC_CALLOC | ||
| 16 | #undef JEMALLOC_OVERRIDE___LIBC_FREE | ||
| 17 | #undef JEMALLOC_OVERRIDE___LIBC_MALLOC | ||
| 18 | #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN | ||
| 19 | #undef JEMALLOC_OVERRIDE___LIBC_REALLOC | ||
| 20 | #undef JEMALLOC_OVERRIDE___LIBC_VALLOC | ||
| 21 | #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN | ||
| 22 | |||
| 23 | /* | ||
| 24 | * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. | ||
| 25 | * For shared libraries, symbol visibility mechanisms prevent these symbols | ||
| 26 | * from being exported, but for static libraries, naming collisions are a real | ||
| 27 | * possibility. | ||
| 28 | */ | ||
| 29 | #undef JEMALLOC_PRIVATE_NAMESPACE | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Hyper-threaded CPUs may need a special instruction inside spin loops in | ||
| 33 | * order to yield to another virtual CPU. | ||
| 34 | */ | ||
| 35 | #undef CPU_SPINWAIT | ||
| 36 | /* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ | ||
| 37 | #undef HAVE_CPU_SPINWAIT | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Number of significant bits in virtual addresses. This may be less than the | ||
| 41 | * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 | ||
| 42 | * bits are the same as bit 47. | ||
| 43 | */ | ||
| 44 | #undef LG_VADDR | ||
| 45 | |||
| 46 | /* Defined if C11 atomics are available. */ | ||
| 47 | #undef JEMALLOC_C11_ATOMICS | ||
| 48 | |||
| 49 | /* Defined if GCC __atomic atomics are available. */ | ||
| 50 | #undef JEMALLOC_GCC_ATOMIC_ATOMICS | ||
| 51 | /* and the 8-bit variant support. */ | ||
| 52 | #undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS | ||
| 53 | |||
| 54 | /* Defined if GCC __sync atomics are available. */ | ||
| 55 | #undef JEMALLOC_GCC_SYNC_ATOMICS | ||
| 56 | /* and the 8-bit variant support. */ | ||
| 57 | #undef JEMALLOC_GCC_U8_SYNC_ATOMICS | ||
| 58 | |||
| 59 | /* | ||
| 60 | * Defined if __builtin_clz() and __builtin_clzl() are available. | ||
| 61 | */ | ||
| 62 | #undef JEMALLOC_HAVE_BUILTIN_CLZ | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. | ||
| 66 | */ | ||
| 67 | #undef JEMALLOC_OS_UNFAIR_LOCK | ||
| 68 | |||
| 69 | /* Defined if syscall(2) is usable. */ | ||
| 70 | #undef JEMALLOC_USE_SYSCALL | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Defined if secure_getenv(3) is available. | ||
| 74 | */ | ||
| 75 | #undef JEMALLOC_HAVE_SECURE_GETENV | ||
| 76 | |||
| 77 | /* | ||
| 78 | * Defined if issetugid(2) is available. | ||
| 79 | */ | ||
| 80 | #undef JEMALLOC_HAVE_ISSETUGID | ||
| 81 | |||
| 82 | /* Defined if pthread_atfork(3) is available. */ | ||
| 83 | #undef JEMALLOC_HAVE_PTHREAD_ATFORK | ||
| 84 | |||
| 85 | /* Defined if pthread_setname_np(3) is available. */ | ||
| 86 | #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP | ||
| 87 | |||
| 88 | /* Defined if pthread_getname_np(3) is available. */ | ||
| 89 | #undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP | ||
| 90 | |||
| 91 | /* Defined if pthread_get_name_np(3) is available. */ | ||
| 92 | #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. | ||
| 96 | */ | ||
| 97 | #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. | ||
| 101 | */ | ||
| 102 | #undef JEMALLOC_HAVE_CLOCK_MONOTONIC | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Defined if mach_absolute_time() is available. | ||
| 106 | */ | ||
| 107 | #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Defined if clock_gettime(CLOCK_REALTIME, ...) is available. | ||
| 111 | */ | ||
| 112 | #undef JEMALLOC_HAVE_CLOCK_REALTIME | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Defined if _malloc_thread_cleanup() exists. At least in the case of | ||
| 116 | * FreeBSD, pthread_key_create() allocates, which if used during malloc | ||
| 117 | * bootstrapping will cause recursion into the pthreads library. Therefore, if | ||
| 118 | * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in | ||
| 119 | * malloc_tsd. | ||
| 120 | */ | ||
| 121 | #undef JEMALLOC_MALLOC_THREAD_CLEANUP | ||
| 122 | |||
| 123 | /* | ||
| 124 | * Defined if threaded initialization is known to be safe on this platform. | ||
| 125 | * Among other things, it must be possible to initialize a mutex without | ||
| 126 | * triggering allocation in order for threaded allocation to be safe. | ||
| 127 | */ | ||
| 128 | #undef JEMALLOC_THREADED_INIT | ||
| 129 | |||
| 130 | /* | ||
| 131 | * Defined if the pthreads implementation defines | ||
| 132 | * _pthread_mutex_init_calloc_cb(), in which case the function is used in order | ||
| 133 | * to avoid recursive allocation during mutex initialization. | ||
| 134 | */ | ||
| 135 | #undef JEMALLOC_MUTEX_INIT_CB | ||
| 136 | |||
| 137 | /* Non-empty if the tls_model attribute is supported. */ | ||
| 138 | #undef JEMALLOC_TLS_MODEL | ||
| 139 | |||
| 140 | /* | ||
| 141 | * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables | ||
| 142 | * inline functions. | ||
| 143 | */ | ||
| 144 | #undef JEMALLOC_DEBUG | ||
| 145 | |||
| 146 | /* JEMALLOC_STATS enables statistics calculation. */ | ||
| 147 | #undef JEMALLOC_STATS | ||
| 148 | |||
| 149 | /* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */ | ||
| 150 | #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API | ||
| 151 | |||
| 152 | /* JEMALLOC_PROF enables allocation profiling. */ | ||
| 153 | #undef JEMALLOC_PROF | ||
| 154 | |||
| 155 | /* Use libunwind for profile backtracing if defined. */ | ||
| 156 | #undef JEMALLOC_PROF_LIBUNWIND | ||
| 157 | |||
| 158 | /* Use libgcc for profile backtracing if defined. */ | ||
| 159 | #undef JEMALLOC_PROF_LIBGCC | ||
| 160 | |||
| 161 | /* Use gcc intrinsics for profile backtracing if defined. */ | ||
| 162 | #undef JEMALLOC_PROF_GCC | ||
| 163 | |||
| 164 | /* | ||
| 165 | * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage | ||
| 166 | * segment (DSS). | ||
| 167 | */ | ||
| 168 | #undef JEMALLOC_DSS | ||
| 169 | |||
| 170 | /* Support memory filling (junk/zero). */ | ||
| 171 | #undef JEMALLOC_FILL | ||
| 172 | |||
| 173 | /* Support utrace(2)-based tracing. */ | ||
| 174 | #undef JEMALLOC_UTRACE | ||
| 175 | |||
| 176 | /* Support utrace(2)-based tracing (label based signature). */ | ||
| 177 | #undef JEMALLOC_UTRACE_LABEL | ||
| 178 | |||
| 179 | /* Support optional abort() on OOM. */ | ||
| 180 | #undef JEMALLOC_XMALLOC | ||
| 181 | |||
| 182 | /* Support lazy locking (avoid locking unless a second thread is launched). */ | ||
| 183 | #undef JEMALLOC_LAZY_LOCK | ||
| 184 | |||
| 185 | /* | ||
| 186 | * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size | ||
| 187 | * classes). | ||
| 188 | */ | ||
| 189 | #undef LG_QUANTUM | ||
| 190 | |||
| 191 | /* One page is 2^LG_PAGE bytes. */ | ||
| 192 | #undef LG_PAGE | ||
| 193 | |||
| 194 | /* Maximum number of regions in a slab. */ | ||
| 195 | #undef CONFIG_LG_SLAB_MAXREGS | ||
| 196 | |||
| 197 | /* | ||
| 198 | * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the | ||
| 199 | * system does not explicitly support huge pages; system calls that require | ||
| 200 | * explicit huge page support are separately configured. | ||
| 201 | */ | ||
| 202 | #undef LG_HUGEPAGE | ||
| 203 | |||
| 204 | /* | ||
| 205 | * If defined, adjacent virtual memory mappings with identical attributes | ||
| 206 | * automatically coalesce, and they fragment when changes are made to subranges. | ||
| 207 | * This is the normal order of things for mmap()/munmap(), but on Windows | ||
| 208 | * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. | ||
| 209 | * mappings do *not* coalesce/fragment. | ||
| 210 | */ | ||
| 211 | #undef JEMALLOC_MAPS_COALESCE | ||
| 212 | |||
| 213 | /* | ||
| 214 | * If defined, retain memory for later reuse by default rather than using e.g. | ||
| 215 | * munmap() to unmap freed extents. This is enabled on 64-bit Linux because | ||
| 216 | * common sequences of mmap()/munmap() calls will cause virtual memory map | ||
| 217 | * holes. | ||
| 218 | */ | ||
| 219 | #undef JEMALLOC_RETAIN | ||
| 220 | |||
| 221 | /* TLS is used to map arenas and magazine caches to threads. */ | ||
| 222 | #undef JEMALLOC_TLS | ||
| 223 | |||
| 224 | /* | ||
| 225 | * Used to mark unreachable code to quiet "end of non-void" compiler warnings. | ||
| 226 | * Don't use this directly; instead use unreachable() from util.h | ||
| 227 | */ | ||
| 228 | #undef JEMALLOC_INTERNAL_UNREACHABLE | ||
| 229 | |||
| 230 | /* | ||
| 231 | * ffs*() functions to use for bitmapping. Don't use these directly; instead, | ||
| 232 | * use ffs_*() from util.h. | ||
| 233 | */ | ||
| 234 | #undef JEMALLOC_INTERNAL_FFSLL | ||
| 235 | #undef JEMALLOC_INTERNAL_FFSL | ||
| 236 | #undef JEMALLOC_INTERNAL_FFS | ||
| 237 | |||
| 238 | /* | ||
| 239 | * popcount*() functions to use for bitmapping. | ||
| 240 | */ | ||
| 241 | #undef JEMALLOC_INTERNAL_POPCOUNTL | ||
| 242 | #undef JEMALLOC_INTERNAL_POPCOUNT | ||
| 243 | |||
| 244 | /* | ||
| 245 | * If defined, explicitly attempt to more uniformly distribute large allocation | ||
| 246 | * pointer alignments across all cache indices. | ||
| 247 | */ | ||
| 248 | #undef JEMALLOC_CACHE_OBLIVIOUS | ||
| 249 | |||
| 250 | /* | ||
| 251 | * If defined, enable logging facilities. We make this a configure option to | ||
| 252 | * avoid taking extra branches everywhere. | ||
| 253 | */ | ||
| 254 | #undef JEMALLOC_LOG | ||
| 255 | |||
| 256 | /* | ||
| 257 | * If defined, use readlinkat() (instead of readlink()) to follow | ||
| 258 | * /etc/malloc_conf. | ||
| 259 | */ | ||
| 260 | #undef JEMALLOC_READLINKAT | ||
| 261 | |||
| 262 | /* | ||
| 263 | * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. | ||
| 264 | */ | ||
| 265 | #undef JEMALLOC_ZONE | ||
| 266 | |||
| 267 | /* | ||
| 268 | * Methods for determining whether the OS overcommits. | ||
| 269 | * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's | ||
| 270 | * /proc/sys/vm.overcommit_memory file. | ||
| 271 | * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. | ||
| 272 | */ | ||
| 273 | #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT | ||
| 274 | #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY | ||
| 275 | |||
| 276 | /* Defined if madvise(2) is available. */ | ||
| 277 | #undef JEMALLOC_HAVE_MADVISE | ||
| 278 | |||
| 279 | /* | ||
| 280 | * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE | ||
| 281 | * arguments to madvise(2). | ||
| 282 | */ | ||
| 283 | #undef JEMALLOC_HAVE_MADVISE_HUGE | ||
| 284 | |||
| 285 | /* | ||
| 286 | * Methods for purging unused pages differ between operating systems. | ||
| 287 | * | ||
| 288 | * madvise(..., MADV_FREE) : This marks pages as being unused, such that they | ||
| 289 | * will be discarded rather than swapped out. | ||
| 290 | * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is | ||
| 291 | * defined, this immediately discards pages, | ||
| 292 | * such that new pages will be demand-zeroed if | ||
| 293 | * the address region is later touched; | ||
| 294 | * otherwise this behaves similarly to | ||
| 295 | * MADV_FREE, though typically with higher | ||
| 296 | * system overhead. | ||
| 297 | */ | ||
| 298 | #undef JEMALLOC_PURGE_MADVISE_FREE | ||
| 299 | #undef JEMALLOC_PURGE_MADVISE_DONTNEED | ||
| 300 | #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS | ||
| 301 | |||
| 302 | /* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ | ||
| 303 | #undef JEMALLOC_DEFINE_MADVISE_FREE | ||
| 304 | |||
| 305 | /* | ||
| 306 | * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. | ||
| 307 | */ | ||
| 308 | #undef JEMALLOC_MADVISE_DONTDUMP | ||
| 309 | |||
| 310 | /* | ||
| 311 | * Defined if MADV_[NO]CORE is supported as an argument to madvise. | ||
| 312 | */ | ||
| 313 | #undef JEMALLOC_MADVISE_NOCORE | ||
| 314 | |||
| 315 | /* Defined if mprotect(2) is available. */ | ||
| 316 | #undef JEMALLOC_HAVE_MPROTECT | ||
| 317 | |||
| 318 | /* | ||
| 319 | * Defined if transparent huge pages (THPs) are supported via the | ||
| 320 | * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. | ||
| 321 | */ | ||
| 322 | #undef JEMALLOC_THP | ||
| 323 | |||
| 324 | /* Defined if posix_madvise is available. */ | ||
| 325 | #undef JEMALLOC_HAVE_POSIX_MADVISE | ||
| 326 | |||
| 327 | /* | ||
| 328 | * Method for purging unused pages using posix_madvise. | ||
| 329 | * | ||
| 330 | * posix_madvise(..., POSIX_MADV_DONTNEED) | ||
| 331 | */ | ||
| 332 | #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED | ||
| 333 | #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS | ||
| 334 | |||
| 335 | /* | ||
| 336 | * Defined if memcntl page admin call is supported | ||
| 337 | */ | ||
| 338 | #undef JEMALLOC_HAVE_MEMCNTL | ||
| 339 | |||
| 340 | /* | ||
| 341 | * Defined if malloc_size is supported | ||
| 342 | */ | ||
| 343 | #undef JEMALLOC_HAVE_MALLOC_SIZE | ||
| 344 | |||
| 345 | /* Define if operating system has alloca.h header. */ | ||
| 346 | #undef JEMALLOC_HAS_ALLOCA_H | ||
| 347 | |||
| 348 | /* C99 restrict keyword supported. */ | ||
| 349 | #undef JEMALLOC_HAS_RESTRICT | ||
| 350 | |||
| 351 | /* For use by hash code. */ | ||
| 352 | #undef JEMALLOC_BIG_ENDIAN | ||
| 353 | |||
| 354 | /* sizeof(int) == 2^LG_SIZEOF_INT. */ | ||
| 355 | #undef LG_SIZEOF_INT | ||
| 356 | |||
| 357 | /* sizeof(long) == 2^LG_SIZEOF_LONG. */ | ||
| 358 | #undef LG_SIZEOF_LONG | ||
| 359 | |||
| 360 | /* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ | ||
| 361 | #undef LG_SIZEOF_LONG_LONG | ||
| 362 | |||
| 363 | /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ | ||
| 364 | #undef LG_SIZEOF_INTMAX_T | ||
| 365 | |||
| 366 | /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ | ||
| 367 | #undef JEMALLOC_GLIBC_MALLOC_HOOK | ||
| 368 | |||
| 369 | /* glibc memalign hook. */ | ||
| 370 | #undef JEMALLOC_GLIBC_MEMALIGN_HOOK | ||
| 371 | |||
| 372 | /* pthread support */ | ||
| 373 | #undef JEMALLOC_HAVE_PTHREAD | ||
| 374 | |||
| 375 | /* dlsym() support */ | ||
| 376 | #undef JEMALLOC_HAVE_DLSYM | ||
| 377 | |||
| 378 | /* Adaptive mutex support in pthreads. */ | ||
| 379 | #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP | ||
| 380 | |||
| 381 | /* GNU specific sched_getcpu support */ | ||
| 382 | #undef JEMALLOC_HAVE_SCHED_GETCPU | ||
| 383 | |||
| 384 | /* GNU specific sched_setaffinity support */ | ||
| 385 | #undef JEMALLOC_HAVE_SCHED_SETAFFINITY | ||
| 386 | |||
| 387 | /* | ||
| 388 | * If defined, all the features necessary for background threads are present. | ||
| 389 | */ | ||
| 390 | #undef JEMALLOC_BACKGROUND_THREAD | ||
| 391 | |||
| 392 | /* | ||
| 393 | * If defined, jemalloc symbols are not exported (doesn't work when | ||
| 394 | * JEMALLOC_PREFIX is not defined). | ||
| 395 | */ | ||
| 396 | #undef JEMALLOC_EXPORT | ||
| 397 | |||
| 398 | /* config.malloc_conf options string. */ | ||
| 399 | #undef JEMALLOC_CONFIG_MALLOC_CONF | ||
| 400 | |||
| 401 | /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ | ||
| 402 | #undef JEMALLOC_IS_MALLOC | ||
| 403 | |||
| 404 | /* | ||
| 405 | * Defined if strerror_r returns char * if _GNU_SOURCE is defined. | ||
| 406 | */ | ||
| 407 | #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE | ||
| 408 | |||
| 409 | /* Performs additional safety checks when defined. */ | ||
| 410 | #undef JEMALLOC_OPT_SAFETY_CHECKS | ||
| 411 | |||
| 412 | /* Is C++ support being built? */ | ||
| 413 | #undef JEMALLOC_ENABLE_CXX | ||
| 414 | |||
| 415 | /* Performs additional size checks when defined. */ | ||
| 416 | #undef JEMALLOC_OPT_SIZE_CHECKS | ||
| 417 | |||
| 418 | /* Allows sampled junk and stash for checking use-after-free when defined. */ | ||
| 419 | #undef JEMALLOC_UAF_DETECTION | ||
| 420 | |||
| 421 | /* Darwin VM_MAKE_TAG support */ | ||
| 422 | #undef JEMALLOC_HAVE_VM_MAKE_TAG | ||
| 423 | |||
| 424 | /* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */ | ||
| 425 | #undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE | ||
| 426 | |||
| 427 | #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h deleted file mode 100644 index 0f179d2..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h +++ /dev/null | |||
| @@ -1,75 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_EXTERNS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/hpa_opts.h" | ||
| 6 | #include "jemalloc/internal/sec_opts.h" | ||
| 7 | #include "jemalloc/internal/tsd_types.h" | ||
| 8 | #include "jemalloc/internal/nstime.h" | ||
| 9 | |||
| 10 | /* TSD checks this to set thread local slow state accordingly. */ | ||
| 11 | extern bool malloc_slow; | ||
| 12 | |||
| 13 | /* Run-time options. */ | ||
| 14 | extern bool opt_abort; | ||
| 15 | extern bool opt_abort_conf; | ||
| 16 | extern bool opt_trust_madvise; | ||
| 17 | extern bool opt_confirm_conf; | ||
| 18 | extern bool opt_hpa; | ||
| 19 | extern hpa_shard_opts_t opt_hpa_opts; | ||
| 20 | extern sec_opts_t opt_hpa_sec_opts; | ||
| 21 | |||
| 22 | extern const char *opt_junk; | ||
| 23 | extern bool opt_junk_alloc; | ||
| 24 | extern bool opt_junk_free; | ||
| 25 | extern void (*junk_free_callback)(void *ptr, size_t size); | ||
| 26 | extern void (*junk_alloc_callback)(void *ptr, size_t size); | ||
| 27 | extern bool opt_utrace; | ||
| 28 | extern bool opt_xmalloc; | ||
| 29 | extern bool opt_experimental_infallible_new; | ||
| 30 | extern bool opt_zero; | ||
| 31 | extern unsigned opt_narenas; | ||
| 32 | extern zero_realloc_action_t opt_zero_realloc_action; | ||
| 33 | extern malloc_init_t malloc_init_state; | ||
| 34 | extern const char *zero_realloc_mode_names[]; | ||
| 35 | extern atomic_zu_t zero_realloc_count; | ||
| 36 | extern bool opt_cache_oblivious; | ||
| 37 | |||
| 38 | /* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */ | ||
| 39 | extern uintptr_t san_cache_bin_nonfast_mask; | ||
| 40 | |||
| 41 | /* Number of CPUs. */ | ||
| 42 | extern unsigned ncpus; | ||
| 43 | |||
| 44 | /* Number of arenas used for automatic multiplexing of threads and arenas. */ | ||
| 45 | extern unsigned narenas_auto; | ||
| 46 | |||
| 47 | /* Base index for manual arenas. */ | ||
| 48 | extern unsigned manual_arena_base; | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Arenas that are used to service external requests. Not all elements of the | ||
| 52 | * arenas array are necessarily used; arenas are created lazily as needed. | ||
| 53 | */ | ||
| 54 | extern atomic_p_t arenas[]; | ||
| 55 | |||
| 56 | void *a0malloc(size_t size); | ||
| 57 | void a0dalloc(void *ptr); | ||
| 58 | void *bootstrap_malloc(size_t size); | ||
| 59 | void *bootstrap_calloc(size_t num, size_t size); | ||
| 60 | void bootstrap_free(void *ptr); | ||
| 61 | void arena_set(unsigned ind, arena_t *arena); | ||
| 62 | unsigned narenas_total_get(void); | ||
| 63 | arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config); | ||
| 64 | arena_t *arena_choose_hard(tsd_t *tsd, bool internal); | ||
| 65 | void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena); | ||
| 66 | void iarena_cleanup(tsd_t *tsd); | ||
| 67 | void arena_cleanup(tsd_t *tsd); | ||
| 68 | size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags); | ||
| 69 | void jemalloc_prefork(void); | ||
| 70 | void jemalloc_postfork_parent(void); | ||
| 71 | void jemalloc_postfork_child(void); | ||
| 72 | void je_sdallocx_noflags(void *ptr, size_t size); | ||
| 73 | void *malloc_default(size_t size, size_t *usize); | ||
| 74 | |||
| 75 | #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h deleted file mode 100644 index 751c112..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h +++ /dev/null | |||
| @@ -1,84 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_INCLUDES_H | ||
| 2 | #define JEMALLOC_INTERNAL_INCLUDES_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * jemalloc can conceptually be broken into components (arena, tcache, etc.), | ||
| 6 | * but there are circular dependencies that cannot be broken without | ||
| 7 | * substantial performance degradation. | ||
| 8 | * | ||
| 9 | * Historically, we dealt with this by each header into four sections (types, | ||
| 10 | * structs, externs, and inlines), and included each header file multiple times | ||
| 11 | * in this file, picking out the portion we want on each pass using the | ||
| 12 | * following #defines: | ||
| 13 | * JEMALLOC_H_TYPES : Preprocessor-defined constants and pseudo-opaque data | ||
| 14 | * types. | ||
| 15 | * JEMALLOC_H_STRUCTS : Data structures. | ||
| 16 | * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. | ||
| 17 | * JEMALLOC_H_INLINES : Inline functions. | ||
| 18 | * | ||
| 19 | * We're moving toward a world in which the dependencies are explicit; each file | ||
| 20 | * will #include the headers it depends on (rather than relying on them being | ||
| 21 | * implicitly available via this file including every header file in the | ||
| 22 | * project). | ||
| 23 | * | ||
| 24 | * We're now in an intermediate state: we've broken up the header files to avoid | ||
| 25 | * having to include each one multiple times, but have not yet moved the | ||
| 26 | * dependency information into the header files (i.e. we still rely on the | ||
| 27 | * ordering in this file to ensure all a header's dependencies are available in | ||
| 28 | * its translation unit). Each component is now broken up into multiple header | ||
| 29 | * files, corresponding to the sections above (e.g. instead of "foo.h", we now | ||
| 30 | * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h"). | ||
| 31 | * | ||
| 32 | * Those files which have been converted to explicitly include their | ||
| 33 | * inter-component dependencies are now in the initial HERMETIC HEADERS | ||
| 34 | * section. All headers may still rely on jemalloc_preamble.h (which, by fiat, | ||
| 35 | * must be included first in every translation unit) for system headers and | ||
| 36 | * global jemalloc definitions, however. | ||
| 37 | */ | ||
| 38 | |||
| 39 | /******************************************************************************/ | ||
| 40 | /* TYPES */ | ||
| 41 | /******************************************************************************/ | ||
| 42 | |||
| 43 | #include "jemalloc/internal/arena_types.h" | ||
| 44 | #include "jemalloc/internal/tcache_types.h" | ||
| 45 | #include "jemalloc/internal/prof_types.h" | ||
| 46 | |||
| 47 | /******************************************************************************/ | ||
| 48 | /* STRUCTS */ | ||
| 49 | /******************************************************************************/ | ||
| 50 | |||
| 51 | #include "jemalloc/internal/prof_structs.h" | ||
| 52 | #include "jemalloc/internal/arena_structs.h" | ||
| 53 | #include "jemalloc/internal/tcache_structs.h" | ||
| 54 | #include "jemalloc/internal/background_thread_structs.h" | ||
| 55 | |||
| 56 | /******************************************************************************/ | ||
| 57 | /* EXTERNS */ | ||
| 58 | /******************************************************************************/ | ||
| 59 | |||
| 60 | #include "jemalloc/internal/jemalloc_internal_externs.h" | ||
| 61 | #include "jemalloc/internal/arena_externs.h" | ||
| 62 | #include "jemalloc/internal/large_externs.h" | ||
| 63 | #include "jemalloc/internal/tcache_externs.h" | ||
| 64 | #include "jemalloc/internal/prof_externs.h" | ||
| 65 | #include "jemalloc/internal/background_thread_externs.h" | ||
| 66 | |||
| 67 | /******************************************************************************/ | ||
| 68 | /* INLINES */ | ||
| 69 | /******************************************************************************/ | ||
| 70 | |||
| 71 | #include "jemalloc/internal/jemalloc_internal_inlines_a.h" | ||
| 72 | /* | ||
| 73 | * Include portions of arena code interleaved with tcache code in order to | ||
| 74 | * resolve circular dependencies. | ||
| 75 | */ | ||
| 76 | #include "jemalloc/internal/arena_inlines_a.h" | ||
| 77 | #include "jemalloc/internal/jemalloc_internal_inlines_b.h" | ||
| 78 | #include "jemalloc/internal/tcache_inlines.h" | ||
| 79 | #include "jemalloc/internal/arena_inlines_b.h" | ||
| 80 | #include "jemalloc/internal/jemalloc_internal_inlines_c.h" | ||
| 81 | #include "jemalloc/internal/prof_inlines.h" | ||
| 82 | #include "jemalloc/internal/background_thread_inlines.h" | ||
| 83 | |||
| 84 | #endif /* JEMALLOC_INTERNAL_INCLUDES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h deleted file mode 100644 index 9e27cc3..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h +++ /dev/null | |||
| @@ -1,122 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_INLINES_A_H | ||
| 2 | #define JEMALLOC_INTERNAL_INLINES_A_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/bit_util.h" | ||
| 6 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 7 | #include "jemalloc/internal/sc.h" | ||
| 8 | #include "jemalloc/internal/ticker.h" | ||
| 9 | |||
| 10 | JEMALLOC_ALWAYS_INLINE malloc_cpuid_t | ||
| 11 | malloc_getcpu(void) { | ||
| 12 | assert(have_percpu_arena); | ||
| 13 | #if defined(_WIN32) | ||
| 14 | return GetCurrentProcessorNumber(); | ||
| 15 | #elif defined(JEMALLOC_HAVE_SCHED_GETCPU) | ||
| 16 | return (malloc_cpuid_t)sched_getcpu(); | ||
| 17 | #else | ||
| 18 | not_reached(); | ||
| 19 | return -1; | ||
| 20 | #endif | ||
| 21 | } | ||
| 22 | |||
| 23 | /* Return the chosen arena index based on current cpu. */ | ||
| 24 | JEMALLOC_ALWAYS_INLINE unsigned | ||
| 25 | percpu_arena_choose(void) { | ||
| 26 | assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); | ||
| 27 | |||
| 28 | malloc_cpuid_t cpuid = malloc_getcpu(); | ||
| 29 | assert(cpuid >= 0); | ||
| 30 | |||
| 31 | unsigned arena_ind; | ||
| 32 | if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / | ||
| 33 | 2)) { | ||
| 34 | arena_ind = cpuid; | ||
| 35 | } else { | ||
| 36 | assert(opt_percpu_arena == per_phycpu_arena); | ||
| 37 | /* Hyper threads on the same physical CPU share arena. */ | ||
| 38 | arena_ind = cpuid - ncpus / 2; | ||
| 39 | } | ||
| 40 | |||
| 41 | return arena_ind; | ||
| 42 | } | ||
| 43 | |||
| 44 | /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */ | ||
| 45 | JEMALLOC_ALWAYS_INLINE unsigned | ||
| 46 | percpu_arena_ind_limit(percpu_arena_mode_t mode) { | ||
| 47 | assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode)); | ||
| 48 | if (mode == per_phycpu_arena && ncpus > 1) { | ||
| 49 | if (ncpus % 2) { | ||
| 50 | /* This likely means a misconfig. */ | ||
| 51 | return ncpus / 2 + 1; | ||
| 52 | } | ||
| 53 | return ncpus / 2; | ||
| 54 | } else { | ||
| 55 | return ncpus; | ||
| 56 | } | ||
| 57 | } | ||
| 58 | |||
| 59 | static inline arena_t * | ||
| 60 | arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { | ||
| 61 | arena_t *ret; | ||
| 62 | |||
| 63 | assert(ind < MALLOCX_ARENA_LIMIT); | ||
| 64 | |||
| 65 | ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); | ||
| 66 | if (unlikely(ret == NULL)) { | ||
| 67 | if (init_if_missing) { | ||
| 68 | ret = arena_init(tsdn, ind, &arena_config_default); | ||
| 69 | } | ||
| 70 | } | ||
| 71 | return ret; | ||
| 72 | } | ||
| 73 | |||
| 74 | JEMALLOC_ALWAYS_INLINE bool | ||
| 75 | tcache_available(tsd_t *tsd) { | ||
| 76 | /* | ||
| 77 | * Thread specific auto tcache might be unavailable if: 1) during tcache | ||
| 78 | * initialization, or 2) disabled through thread.tcache.enabled mallctl | ||
| 79 | * or config options. This check covers all cases. | ||
| 80 | */ | ||
| 81 | if (likely(tsd_tcache_enabled_get(tsd))) { | ||
| 82 | /* Associated arena == NULL implies tcache init in progress. */ | ||
| 83 | if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) { | ||
| 84 | tcache_assert_initialized(tsd_tcachep_get(tsd)); | ||
| 85 | } | ||
| 86 | return true; | ||
| 87 | } | ||
| 88 | |||
| 89 | return false; | ||
| 90 | } | ||
| 91 | |||
| 92 | JEMALLOC_ALWAYS_INLINE tcache_t * | ||
| 93 | tcache_get(tsd_t *tsd) { | ||
| 94 | if (!tcache_available(tsd)) { | ||
| 95 | return NULL; | ||
| 96 | } | ||
| 97 | |||
| 98 | return tsd_tcachep_get(tsd); | ||
| 99 | } | ||
| 100 | |||
| 101 | JEMALLOC_ALWAYS_INLINE tcache_slow_t * | ||
| 102 | tcache_slow_get(tsd_t *tsd) { | ||
| 103 | if (!tcache_available(tsd)) { | ||
| 104 | return NULL; | ||
| 105 | } | ||
| 106 | |||
| 107 | return tsd_tcache_slowp_get(tsd); | ||
| 108 | } | ||
| 109 | |||
| 110 | static inline void | ||
| 111 | pre_reentrancy(tsd_t *tsd, arena_t *arena) { | ||
| 112 | /* arena is the current context. Reentry from a0 is not allowed. */ | ||
| 113 | assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); | ||
| 114 | tsd_pre_reentrancy_raw(tsd); | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline void | ||
| 118 | post_reentrancy(tsd_t *tsd) { | ||
| 119 | tsd_post_reentrancy_raw(tsd); | ||
| 120 | } | ||
| 121 | |||
| 122 | #endif /* JEMALLOC_INTERNAL_INLINES_A_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h deleted file mode 100644 index 152f8a0..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h +++ /dev/null | |||
| @@ -1,103 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_INLINES_B_H | ||
| 2 | #define JEMALLOC_INTERNAL_INLINES_B_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/extent.h" | ||
| 5 | |||
| 6 | static inline void | ||
| 7 | percpu_arena_update(tsd_t *tsd, unsigned cpu) { | ||
| 8 | assert(have_percpu_arena); | ||
| 9 | arena_t *oldarena = tsd_arena_get(tsd); | ||
| 10 | assert(oldarena != NULL); | ||
| 11 | unsigned oldind = arena_ind_get(oldarena); | ||
| 12 | |||
| 13 | if (oldind != cpu) { | ||
| 14 | unsigned newind = cpu; | ||
| 15 | arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); | ||
| 16 | assert(newarena != NULL); | ||
| 17 | |||
| 18 | /* Set new arena/tcache associations. */ | ||
| 19 | arena_migrate(tsd, oldarena, newarena); | ||
| 20 | tcache_t *tcache = tcache_get(tsd); | ||
| 21 | if (tcache != NULL) { | ||
| 22 | tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); | ||
| 23 | tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow, | ||
| 24 | tcache, newarena); | ||
| 25 | } | ||
| 26 | } | ||
| 27 | } | ||
| 28 | |||
| 29 | |||
| 30 | /* Choose an arena based on a per-thread value. */ | ||
| 31 | static inline arena_t * | ||
| 32 | arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { | ||
| 33 | arena_t *ret; | ||
| 34 | |||
| 35 | if (arena != NULL) { | ||
| 36 | return arena; | ||
| 37 | } | ||
| 38 | |||
| 39 | /* During reentrancy, arena 0 is the safest bet. */ | ||
| 40 | if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { | ||
| 41 | return arena_get(tsd_tsdn(tsd), 0, true); | ||
| 42 | } | ||
| 43 | |||
| 44 | ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); | ||
| 45 | if (unlikely(ret == NULL)) { | ||
| 46 | ret = arena_choose_hard(tsd, internal); | ||
| 47 | assert(ret); | ||
| 48 | if (tcache_available(tsd)) { | ||
| 49 | tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); | ||
| 50 | tcache_t *tcache = tsd_tcachep_get(tsd); | ||
| 51 | if (tcache_slow->arena != NULL) { | ||
| 52 | /* See comments in tsd_tcache_data_init().*/ | ||
| 53 | assert(tcache_slow->arena == | ||
| 54 | arena_get(tsd_tsdn(tsd), 0, false)); | ||
| 55 | if (tcache_slow->arena != ret) { | ||
| 56 | tcache_arena_reassociate(tsd_tsdn(tsd), | ||
| 57 | tcache_slow, tcache, ret); | ||
| 58 | } | ||
| 59 | } else { | ||
| 60 | tcache_arena_associate(tsd_tsdn(tsd), | ||
| 61 | tcache_slow, tcache, ret); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | } | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Note that for percpu arena, if the current arena is outside of the | ||
| 68 | * auto percpu arena range, (i.e. thread is assigned to a manually | ||
| 69 | * managed arena), then percpu arena is skipped. | ||
| 70 | */ | ||
| 71 | if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && | ||
| 72 | !internal && (arena_ind_get(ret) < | ||
| 73 | percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != | ||
| 74 | tsd_tsdn(tsd))) { | ||
| 75 | unsigned ind = percpu_arena_choose(); | ||
| 76 | if (arena_ind_get(ret) != ind) { | ||
| 77 | percpu_arena_update(tsd, ind); | ||
| 78 | ret = tsd_arena_get(tsd); | ||
| 79 | } | ||
| 80 | ret->last_thd = tsd_tsdn(tsd); | ||
| 81 | } | ||
| 82 | |||
| 83 | return ret; | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline arena_t * | ||
| 87 | arena_choose(tsd_t *tsd, arena_t *arena) { | ||
| 88 | return arena_choose_impl(tsd, arena, false); | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline arena_t * | ||
| 92 | arena_ichoose(tsd_t *tsd, arena_t *arena) { | ||
| 93 | return arena_choose_impl(tsd, arena, true); | ||
| 94 | } | ||
| 95 | |||
| 96 | static inline bool | ||
| 97 | arena_is_auto(arena_t *arena) { | ||
| 98 | assert(narenas_auto > 0); | ||
| 99 | |||
| 100 | return (arena_ind_get(arena) < manual_arena_base); | ||
| 101 | } | ||
| 102 | |||
| 103 | #endif /* JEMALLOC_INTERNAL_INLINES_B_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h deleted file mode 100644 index 620d097..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h +++ /dev/null | |||
| @@ -1,393 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_INLINES_C_H | ||
| 2 | #define JEMALLOC_INTERNAL_INLINES_C_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/hook.h" | ||
| 5 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 6 | #include "jemalloc/internal/log.h" | ||
| 7 | #include "jemalloc/internal/sz.h" | ||
| 8 | #include "jemalloc/internal/thread_event.h" | ||
| 9 | #include "jemalloc/internal/witness.h" | ||
| 10 | |||
| 11 | /* | ||
| 12 | * Translating the names of the 'i' functions: | ||
| 13 | * Abbreviations used in the first part of the function name (before | ||
| 14 | * alloc/dalloc) describe what that function accomplishes: | ||
| 15 | * a: arena (query) | ||
| 16 | * s: size (query, or sized deallocation) | ||
| 17 | * e: extent (query) | ||
| 18 | * p: aligned (allocates) | ||
| 19 | * vs: size (query, without knowing that the pointer is into the heap) | ||
| 20 | * r: rallocx implementation | ||
| 21 | * x: xallocx implementation | ||
| 22 | * Abbreviations used in the second part of the function name (after | ||
| 23 | * alloc/dalloc) describe the arguments it takes | ||
| 24 | * z: whether to return zeroed memory | ||
| 25 | * t: accepts a tcache_t * parameter | ||
| 26 | * m: accepts an arena_t * parameter | ||
| 27 | */ | ||
| 28 | |||
| 29 | JEMALLOC_ALWAYS_INLINE arena_t * | ||
| 30 | iaalloc(tsdn_t *tsdn, const void *ptr) { | ||
| 31 | assert(ptr != NULL); | ||
| 32 | |||
| 33 | return arena_aalloc(tsdn, ptr); | ||
| 34 | } | ||
| 35 | |||
| 36 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 37 | isalloc(tsdn_t *tsdn, const void *ptr) { | ||
| 38 | assert(ptr != NULL); | ||
| 39 | |||
| 40 | return arena_salloc(tsdn, ptr); | ||
| 41 | } | ||
| 42 | |||
| 43 | JEMALLOC_ALWAYS_INLINE void * | ||
| 44 | iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, | ||
| 45 | bool is_internal, arena_t *arena, bool slow_path) { | ||
| 46 | void *ret; | ||
| 47 | |||
| 48 | assert(!is_internal || tcache == NULL); | ||
| 49 | assert(!is_internal || arena == NULL || arena_is_auto(arena)); | ||
| 50 | if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { | ||
| 51 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), | ||
| 52 | WITNESS_RANK_CORE, 0); | ||
| 53 | } | ||
| 54 | |||
| 55 | ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); | ||
| 56 | if (config_stats && is_internal && likely(ret != NULL)) { | ||
| 57 | arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); | ||
| 58 | } | ||
| 59 | return ret; | ||
| 60 | } | ||
| 61 | |||
| 62 | JEMALLOC_ALWAYS_INLINE void * | ||
| 63 | ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { | ||
| 64 | return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, | ||
| 65 | NULL, slow_path); | ||
| 66 | } | ||
| 67 | |||
| 68 | JEMALLOC_ALWAYS_INLINE void * | ||
| 69 | ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, | ||
| 70 | tcache_t *tcache, bool is_internal, arena_t *arena) { | ||
| 71 | void *ret; | ||
| 72 | |||
| 73 | assert(usize != 0); | ||
| 74 | assert(usize == sz_sa2u(usize, alignment)); | ||
| 75 | assert(!is_internal || tcache == NULL); | ||
| 76 | assert(!is_internal || arena == NULL || arena_is_auto(arena)); | ||
| 77 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), | ||
| 78 | WITNESS_RANK_CORE, 0); | ||
| 79 | |||
| 80 | ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); | ||
| 81 | assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); | ||
| 82 | if (config_stats && is_internal && likely(ret != NULL)) { | ||
| 83 | arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); | ||
| 84 | } | ||
| 85 | return ret; | ||
| 86 | } | ||
| 87 | |||
| 88 | JEMALLOC_ALWAYS_INLINE void * | ||
| 89 | ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, | ||
| 90 | tcache_t *tcache, arena_t *arena) { | ||
| 91 | return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena); | ||
| 92 | } | ||
| 93 | |||
| 94 | JEMALLOC_ALWAYS_INLINE void * | ||
| 95 | ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { | ||
| 96 | return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, | ||
| 97 | tcache_get(tsd), false, NULL); | ||
| 98 | } | ||
| 99 | |||
| 100 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 101 | ivsalloc(tsdn_t *tsdn, const void *ptr) { | ||
| 102 | return arena_vsalloc(tsdn, ptr); | ||
| 103 | } | ||
| 104 | |||
| 105 | JEMALLOC_ALWAYS_INLINE void | ||
| 106 | idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, | ||
| 107 | emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) { | ||
| 108 | assert(ptr != NULL); | ||
| 109 | assert(!is_internal || tcache == NULL); | ||
| 110 | assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); | ||
| 111 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), | ||
| 112 | WITNESS_RANK_CORE, 0); | ||
| 113 | if (config_stats && is_internal) { | ||
| 114 | arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); | ||
| 115 | } | ||
| 116 | if (!is_internal && !tsdn_null(tsdn) && | ||
| 117 | tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { | ||
| 118 | assert(tcache == NULL); | ||
| 119 | } | ||
| 120 | arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); | ||
| 121 | } | ||
| 122 | |||
| 123 | JEMALLOC_ALWAYS_INLINE void | ||
| 124 | idalloc(tsd_t *tsd, void *ptr) { | ||
| 125 | idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); | ||
| 126 | } | ||
| 127 | |||
| 128 | JEMALLOC_ALWAYS_INLINE void | ||
| 129 | isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, | ||
| 130 | emap_alloc_ctx_t *alloc_ctx, bool slow_path) { | ||
| 131 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), | ||
| 132 | WITNESS_RANK_CORE, 0); | ||
| 133 | arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); | ||
| 134 | } | ||
| 135 | |||
| 136 | JEMALLOC_ALWAYS_INLINE void * | ||
| 137 | iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, | ||
| 138 | size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, | ||
| 139 | hook_ralloc_args_t *hook_args) { | ||
| 140 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), | ||
| 141 | WITNESS_RANK_CORE, 0); | ||
| 142 | void *p; | ||
| 143 | size_t usize, copysize; | ||
| 144 | |||
| 145 | usize = sz_sa2u(size, alignment); | ||
| 146 | if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { | ||
| 147 | return NULL; | ||
| 148 | } | ||
| 149 | p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); | ||
| 150 | if (p == NULL) { | ||
| 151 | return NULL; | ||
| 152 | } | ||
| 153 | /* | ||
| 154 | * Copy at most size bytes (not size+extra), since the caller has no | ||
| 155 | * expectation that the extra bytes will be reliably preserved. | ||
| 156 | */ | ||
| 157 | copysize = (size < oldsize) ? size : oldsize; | ||
| 158 | memcpy(p, ptr, copysize); | ||
| 159 | hook_invoke_alloc(hook_args->is_realloc | ||
| 160 | ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p, | ||
| 161 | hook_args->args); | ||
| 162 | hook_invoke_dalloc(hook_args->is_realloc | ||
| 163 | ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); | ||
| 164 | isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); | ||
| 165 | return p; | ||
| 166 | } | ||
| 167 | |||
| 168 | /* | ||
| 169 | * is_realloc threads through the knowledge of whether or not this call comes | ||
| 170 | * from je_realloc (as opposed to je_rallocx); this ensures that we pass the | ||
| 171 | * correct entry point into any hooks. | ||
| 172 | * Note that these functions are all force-inlined, so no actual bool gets | ||
| 173 | * passed-around anywhere. | ||
| 174 | */ | ||
| 175 | JEMALLOC_ALWAYS_INLINE void * | ||
| 176 | iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, | ||
| 177 | bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args) | ||
| 178 | { | ||
| 179 | assert(ptr != NULL); | ||
| 180 | assert(size != 0); | ||
| 181 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), | ||
| 182 | WITNESS_RANK_CORE, 0); | ||
| 183 | |||
| 184 | if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) | ||
| 185 | != 0) { | ||
| 186 | /* | ||
| 187 | * Existing object alignment is inadequate; allocate new space | ||
| 188 | * and copy. | ||
| 189 | */ | ||
| 190 | return iralloct_realign(tsdn, ptr, oldsize, size, alignment, | ||
| 191 | zero, tcache, arena, hook_args); | ||
| 192 | } | ||
| 193 | |||
| 194 | return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero, | ||
| 195 | tcache, hook_args); | ||
| 196 | } | ||
| 197 | |||
| 198 | JEMALLOC_ALWAYS_INLINE void * | ||
| 199 | iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, | ||
| 200 | bool zero, hook_ralloc_args_t *hook_args) { | ||
| 201 | return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, | ||
| 202 | tcache_get(tsd), NULL, hook_args); | ||
| 203 | } | ||
| 204 | |||
| 205 | JEMALLOC_ALWAYS_INLINE bool | ||
| 206 | ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, | ||
| 207 | size_t alignment, bool zero, size_t *newsize) { | ||
| 208 | assert(ptr != NULL); | ||
| 209 | assert(size != 0); | ||
| 210 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), | ||
| 211 | WITNESS_RANK_CORE, 0); | ||
| 212 | |||
| 213 | if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) | ||
| 214 | != 0) { | ||
| 215 | /* Existing object alignment is inadequate. */ | ||
| 216 | *newsize = oldsize; | ||
| 217 | return true; | ||
| 218 | } | ||
| 219 | |||
| 220 | return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero, | ||
| 221 | newsize); | ||
| 222 | } | ||
| 223 | |||
| 224 | JEMALLOC_ALWAYS_INLINE void | ||
| 225 | fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after, | ||
| 226 | cache_bin_t *bin, void *ret) { | ||
| 227 | thread_allocated_set(tsd, allocated_after); | ||
| 228 | if (config_stats) { | ||
| 229 | bin->tstats.nrequests++; | ||
| 230 | } | ||
| 231 | |||
| 232 | LOG("core.malloc.exit", "result: %p", ret); | ||
| 233 | } | ||
| 234 | |||
| 235 | JEMALLOC_ALWAYS_INLINE bool | ||
| 236 | malloc_initialized(void) { | ||
| 237 | return (malloc_init_state == malloc_init_initialized); | ||
| 238 | } | ||
| 239 | |||
| 240 | /* | ||
| 241 | * malloc() fastpath. Included here so that we can inline it into operator new; | ||
| 242 | * function call overhead there is non-negligible as a fraction of total CPU in | ||
| 243 | * allocation-heavy C++ programs. We take the fallback alloc to allow malloc | ||
| 244 | * (which can return NULL) to differ in its behavior from operator new (which | ||
| 245 | * can't). It matches the signature of malloc / operator new so that we can | ||
| 246 | * tail-call the fallback allocator, allowing us to avoid setting up the call | ||
| 247 | * frame in the common case. | ||
| 248 | * | ||
| 249 | * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit | ||
| 250 | * tcache. If either of these is false, we tail-call to the slowpath, | ||
| 251 | * malloc_default(). Tail-calling is used to avoid any caller-saved | ||
| 252 | * registers. | ||
| 253 | * | ||
| 254 | * fastpath supports ticker and profiling, both of which will also | ||
| 255 | * tail-call to the slowpath if they fire. | ||
| 256 | */ | ||
| 257 | JEMALLOC_ALWAYS_INLINE void * | ||
| 258 | imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t, size_t *), size_t *usable_size) { | ||
| 259 | LOG("core.malloc.entry", "size: %zu", size); | ||
| 260 | if (tsd_get_allocates() && unlikely(!malloc_initialized())) { | ||
| 261 | return fallback_alloc(size, usable_size); | ||
| 262 | } | ||
| 263 | |||
| 264 | tsd_t *tsd = tsd_get(false); | ||
| 265 | if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) { | ||
| 266 | return fallback_alloc(size, usable_size); | ||
| 267 | } | ||
| 268 | /* | ||
| 269 | * The code below till the branch checking the next_event threshold may | ||
| 270 | * execute before malloc_init(), in which case the threshold is 0 to | ||
| 271 | * trigger slow path and initialization. | ||
| 272 | * | ||
| 273 | * Note that when uninitialized, only the fast-path variants of the sz / | ||
| 274 | * tsd facilities may be called. | ||
| 275 | */ | ||
| 276 | szind_t ind; | ||
| 277 | /* | ||
| 278 | * The thread_allocated counter in tsd serves as a general purpose | ||
| 279 | * accumulator for bytes of allocation to trigger different types of | ||
| 280 | * events. usize is always needed to advance thread_allocated, though | ||
| 281 | * it's not always needed in the core allocation logic. | ||
| 282 | */ | ||
| 283 | size_t usize; | ||
| 284 | sz_size2index_usize_fastpath(size, &ind, &usize); | ||
| 285 | /* Fast path relies on size being a bin. */ | ||
| 286 | assert(ind < SC_NBINS); | ||
| 287 | assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) && | ||
| 288 | (size <= SC_SMALL_MAXCLASS)); | ||
| 289 | |||
| 290 | uint64_t allocated, threshold; | ||
| 291 | te_malloc_fastpath_ctx(tsd, &allocated, &threshold); | ||
| 292 | uint64_t allocated_after = allocated + usize; | ||
| 293 | /* | ||
| 294 | * The ind and usize might be uninitialized (or partially) before | ||
| 295 | * malloc_init(). The assertions check for: 1) full correctness (usize | ||
| 296 | * & ind) when initialized; and 2) guaranteed slow-path (threshold == 0) | ||
| 297 | * when !initialized. | ||
| 298 | */ | ||
| 299 | if (!malloc_initialized()) { | ||
| 300 | assert(threshold == 0); | ||
| 301 | } else { | ||
| 302 | assert(ind == sz_size2index(size)); | ||
| 303 | assert(usize > 0 && usize == sz_index2size(ind)); | ||
| 304 | } | ||
| 305 | /* | ||
| 306 | * Check for events and tsd non-nominal (fast_threshold will be set to | ||
| 307 | * 0) in a single branch. | ||
| 308 | */ | ||
| 309 | if (unlikely(allocated_after >= threshold)) { | ||
| 310 | return fallback_alloc(size, usable_size); | ||
| 311 | } | ||
| 312 | assert(tsd_fast(tsd)); | ||
| 313 | |||
| 314 | tcache_t *tcache = tsd_tcachep_get(tsd); | ||
| 315 | assert(tcache == tcache_get(tsd)); | ||
| 316 | cache_bin_t *bin = &tcache->bins[ind]; | ||
| 317 | bool tcache_success; | ||
| 318 | void *ret; | ||
| 319 | |||
| 320 | /* | ||
| 321 | * We split up the code this way so that redundant low-water | ||
| 322 | * computation doesn't happen on the (more common) case in which we | ||
| 323 | * don't touch the low water mark. The compiler won't do this | ||
| 324 | * duplication on its own. | ||
| 325 | */ | ||
| 326 | ret = cache_bin_alloc_easy(bin, &tcache_success); | ||
| 327 | if (tcache_success) { | ||
| 328 | fastpath_success_finish(tsd, allocated_after, bin, ret); | ||
| 329 | if (usable_size) *usable_size = usize; | ||
| 330 | return ret; | ||
| 331 | } | ||
| 332 | ret = cache_bin_alloc(bin, &tcache_success); | ||
| 333 | if (tcache_success) { | ||
| 334 | fastpath_success_finish(tsd, allocated_after, bin, ret); | ||
| 335 | if (usable_size) *usable_size = usize; | ||
| 336 | return ret; | ||
| 337 | } | ||
| 338 | |||
| 339 | return fallback_alloc(size, usable_size); | ||
| 340 | } | ||
| 341 | |||
| 342 | JEMALLOC_ALWAYS_INLINE int | ||
| 343 | iget_defrag_hint(tsdn_t *tsdn, void* ptr) { | ||
| 344 | int defrag = 0; | ||
| 345 | emap_alloc_ctx_t alloc_ctx; | ||
| 346 | emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx); | ||
| 347 | if (likely(alloc_ctx.slab)) { | ||
| 348 | /* Small allocation. */ | ||
| 349 | edata_t *slab = emap_edata_lookup(tsdn, &arena_emap_global, ptr); | ||
| 350 | arena_t *arena = arena_get_from_edata(slab); | ||
| 351 | szind_t binind = edata_szind_get(slab); | ||
| 352 | unsigned binshard = edata_binshard_get(slab); | ||
| 353 | bin_t *bin = arena_get_bin(arena, binind, binshard); | ||
| 354 | malloc_mutex_lock(tsdn, &bin->lock); | ||
| 355 | arena_dalloc_bin_locked_info_t info; | ||
| 356 | arena_dalloc_bin_locked_begin(&info, binind); | ||
| 357 | /* Don't bother moving allocations from the slab currently used for new allocations */ | ||
| 358 | if (slab != bin->slabcur) { | ||
| 359 | int free_in_slab = edata_nfree_get(slab); | ||
| 360 | if (free_in_slab) { | ||
| 361 | const bin_info_t *bin_info = &bin_infos[binind]; | ||
| 362 | /* Find number of non-full slabs and the number of regs in them */ | ||
| 363 | unsigned long curslabs = 0; | ||
| 364 | size_t curregs = 0; | ||
| 365 | /* Run on all bin shards (usually just one) */ | ||
| 366 | for (uint32_t i=0; i< bin_info->n_shards; i++) { | ||
| 367 | bin_t *bb = arena_get_bin(arena, binind, i); | ||
| 368 | curslabs += bb->stats.nonfull_slabs; | ||
| 369 | /* Deduct the regs in full slabs (they're not part of the game) */ | ||
| 370 | unsigned long full_slabs = bb->stats.curslabs - bb->stats.nonfull_slabs; | ||
| 371 | curregs += bb->stats.curregs - full_slabs * bin_info->nregs; | ||
| 372 | if (bb->slabcur) { | ||
| 373 | /* Remove slabcur from the overall utilization (not a candidate to nove from) */ | ||
| 374 | curregs -= bin_info->nregs - edata_nfree_get(bb->slabcur); | ||
| 375 | curslabs -= 1; | ||
| 376 | } | ||
| 377 | } | ||
| 378 | /* Compare the utilization ratio of the slab in question to the total average | ||
| 379 | * among non-full slabs. To avoid precision loss in division, we do that by | ||
| 380 | * extrapolating the usage of the slab as if all slabs have the same usage. | ||
| 381 | * If this slab is less used than the average, we'll prefer to move the data | ||
| 382 | * to hopefully more used ones. To avoid stagnation when all slabs have the same | ||
| 383 | * utilization, we give additional 12.5% weight to the decision to defrag. */ | ||
| 384 | defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs + curregs / 8; | ||
| 385 | } | ||
| 386 | } | ||
| 387 | arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info); | ||
| 388 | malloc_mutex_unlock(tsdn, &bin->lock); | ||
| 389 | } | ||
| 390 | return defrag; | ||
| 391 | } | ||
| 392 | |||
| 393 | #endif /* JEMALLOC_INTERNAL_INLINES_C_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h deleted file mode 100644 index e97b5f9..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h +++ /dev/null | |||
| @@ -1,111 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_MACROS_H | ||
| 2 | #define JEMALLOC_INTERNAL_MACROS_H | ||
| 3 | |||
| 4 | #ifdef JEMALLOC_DEBUG | ||
| 5 | # define JEMALLOC_ALWAYS_INLINE static inline | ||
| 6 | #else | ||
| 7 | # ifdef _MSC_VER | ||
| 8 | # define JEMALLOC_ALWAYS_INLINE static __forceinline | ||
| 9 | # else | ||
| 10 | # define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline | ||
| 11 | # endif | ||
| 12 | #endif | ||
| 13 | #ifdef _MSC_VER | ||
| 14 | # define inline _inline | ||
| 15 | #endif | ||
| 16 | |||
| 17 | #define UNUSED JEMALLOC_ATTR(unused) | ||
| 18 | |||
| 19 | #define ZU(z) ((size_t)z) | ||
| 20 | #define ZD(z) ((ssize_t)z) | ||
| 21 | #define QU(q) ((uint64_t)q) | ||
| 22 | #define QD(q) ((int64_t)q) | ||
| 23 | |||
| 24 | #define KZU(z) ZU(z##ULL) | ||
| 25 | #define KZD(z) ZD(z##LL) | ||
| 26 | #define KQU(q) QU(q##ULL) | ||
| 27 | #define KQD(q) QI(q##LL) | ||
| 28 | |||
| 29 | #ifndef __DECONST | ||
| 30 | # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) | ||
| 31 | #endif | ||
| 32 | |||
| 33 | #if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) | ||
| 34 | # define restrict | ||
| 35 | #endif | ||
| 36 | |||
| 37 | /* Various function pointers are static and immutable except during testing. */ | ||
| 38 | #ifdef JEMALLOC_JET | ||
| 39 | # define JET_MUTABLE | ||
| 40 | #else | ||
| 41 | # define JET_MUTABLE const | ||
| 42 | #endif | ||
| 43 | |||
| 44 | #define JEMALLOC_VA_ARGS_HEAD(head, ...) head | ||
| 45 | #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ | ||
| 46 | |||
| 47 | /* Diagnostic suppression macros */ | ||
| 48 | #if defined(_MSC_VER) && !defined(__clang__) | ||
| 49 | # define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) | ||
| 50 | # define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop)) | ||
| 51 | # define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W)) | ||
| 52 | # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS | ||
| 53 | # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS | ||
| 54 | # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN | ||
| 55 | # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS | ||
| 56 | /* #pragma GCC diagnostic first appeared in gcc 4.6. */ | ||
| 57 | #elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \ | ||
| 58 | (__GNUC_MINOR__ > 5)))) || defined(__clang__) | ||
| 59 | /* | ||
| 60 | * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang | ||
| 61 | * diagnostic suppression macros and should not be used anywhere else. | ||
| 62 | */ | ||
| 63 | # define JEMALLOC_PRAGMA__(X) _Pragma(#X) | ||
| 64 | # define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push) | ||
| 65 | # define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop) | ||
| 66 | # define JEMALLOC_DIAGNOSTIC_IGNORE(W) \ | ||
| 67 | JEMALLOC_PRAGMA__(GCC diagnostic ignored W) | ||
| 68 | |||
| 69 | /* | ||
| 70 | * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and | ||
| 71 | * all clang versions up to version 7 (currently trunk, unreleased). This macro | ||
| 72 | * suppresses the warning for the affected compiler versions only. | ||
| 73 | */ | ||
| 74 | # if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \ | ||
| 75 | defined(__clang__) | ||
| 76 | # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \ | ||
| 77 | JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers") | ||
| 78 | # else | ||
| 79 | # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS | ||
| 80 | # endif | ||
| 81 | |||
| 82 | # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \ | ||
| 83 | JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits") | ||
| 84 | # define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \ | ||
| 85 | JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter") | ||
| 86 | # if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7) | ||
| 87 | # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \ | ||
| 88 | JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=") | ||
| 89 | # else | ||
| 90 | # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN | ||
| 91 | # endif | ||
| 92 | # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \ | ||
| 93 | JEMALLOC_DIAGNOSTIC_PUSH \ | ||
| 94 | JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER | ||
| 95 | #else | ||
| 96 | # define JEMALLOC_DIAGNOSTIC_PUSH | ||
| 97 | # define JEMALLOC_DIAGNOSTIC_POP | ||
| 98 | # define JEMALLOC_DIAGNOSTIC_IGNORE(W) | ||
| 99 | # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS | ||
| 100 | # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS | ||
| 101 | # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN | ||
| 102 | # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS | ||
| 103 | #endif | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Disables spurious diagnostics for all headers. Since these headers are not | ||
| 107 | * included by users directly, it does not affect their diagnostic settings. | ||
| 108 | */ | ||
| 109 | JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS | ||
| 110 | |||
| 111 | #endif /* JEMALLOC_INTERNAL_MACROS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h deleted file mode 100644 index 62c2b59..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h +++ /dev/null | |||
| @@ -1,130 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TYPES_H | ||
| 2 | #define JEMALLOC_INTERNAL_TYPES_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/quantum.h" | ||
| 5 | |||
| 6 | /* Processor / core id type. */ | ||
| 7 | typedef int malloc_cpuid_t; | ||
| 8 | |||
| 9 | /* When realloc(non-null-ptr, 0) is called, what happens? */ | ||
| 10 | enum zero_realloc_action_e { | ||
| 11 | /* Realloc(ptr, 0) is free(ptr); return malloc(0); */ | ||
| 12 | zero_realloc_action_alloc = 0, | ||
| 13 | /* Realloc(ptr, 0) is free(ptr); */ | ||
| 14 | zero_realloc_action_free = 1, | ||
| 15 | /* Realloc(ptr, 0) aborts. */ | ||
| 16 | zero_realloc_action_abort = 2 | ||
| 17 | }; | ||
| 18 | typedef enum zero_realloc_action_e zero_realloc_action_t; | ||
| 19 | |||
| 20 | /* Signature of write callback. */ | ||
| 21 | typedef void (write_cb_t)(void *, const char *); | ||
| 22 | |||
| 23 | enum malloc_init_e { | ||
| 24 | malloc_init_uninitialized = 3, | ||
| 25 | malloc_init_a0_initialized = 2, | ||
| 26 | malloc_init_recursible = 1, | ||
| 27 | malloc_init_initialized = 0 /* Common case --> jnz. */ | ||
| 28 | }; | ||
| 29 | typedef enum malloc_init_e malloc_init_t; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Flags bits: | ||
| 33 | * | ||
| 34 | * a: arena | ||
| 35 | * t: tcache | ||
| 36 | * 0: unused | ||
| 37 | * z: zero | ||
| 38 | * n: alignment | ||
| 39 | * | ||
| 40 | * aaaaaaaa aaaatttt tttttttt 0znnnnnn | ||
| 41 | */ | ||
| 42 | #define MALLOCX_ARENA_BITS 12 | ||
| 43 | #define MALLOCX_TCACHE_BITS 12 | ||
| 44 | #define MALLOCX_LG_ALIGN_BITS 6 | ||
| 45 | #define MALLOCX_ARENA_SHIFT 20 | ||
| 46 | #define MALLOCX_TCACHE_SHIFT 8 | ||
| 47 | #define MALLOCX_ARENA_MASK \ | ||
| 48 | (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT) | ||
| 49 | /* NB: Arena index bias decreases the maximum number of arenas by 1. */ | ||
| 50 | #define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1) | ||
| 51 | #define MALLOCX_TCACHE_MASK \ | ||
| 52 | (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT) | ||
| 53 | #define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3) | ||
| 54 | #define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) | ||
| 55 | /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ | ||
| 56 | #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ | ||
| 57 | (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) | ||
| 58 | #define MALLOCX_ALIGN_GET(flags) \ | ||
| 59 | (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) | ||
| 60 | #define MALLOCX_ZERO_GET(flags) \ | ||
| 61 | ((bool)(flags & MALLOCX_ZERO)) | ||
| 62 | |||
| 63 | #define MALLOCX_TCACHE_GET(flags) \ | ||
| 64 | (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2) | ||
| 65 | #define MALLOCX_ARENA_GET(flags) \ | ||
| 66 | (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) | ||
| 67 | |||
| 68 | /* Smallest size class to support. */ | ||
| 69 | #define TINY_MIN (1U << LG_TINY_MIN) | ||
| 70 | |||
| 71 | #define LONG ((size_t)(1U << LG_SIZEOF_LONG)) | ||
| 72 | #define LONG_MASK (LONG - 1) | ||
| 73 | |||
| 74 | /* Return the smallest long multiple that is >= a. */ | ||
| 75 | #define LONG_CEILING(a) \ | ||
| 76 | (((a) + LONG_MASK) & ~LONG_MASK) | ||
| 77 | |||
| 78 | #define SIZEOF_PTR (1U << LG_SIZEOF_PTR) | ||
| 79 | #define PTR_MASK (SIZEOF_PTR - 1) | ||
| 80 | |||
| 81 | /* Return the smallest (void *) multiple that is >= a. */ | ||
| 82 | #define PTR_CEILING(a) \ | ||
| 83 | (((a) + PTR_MASK) & ~PTR_MASK) | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Maximum size of L1 cache line. This is used to avoid cache line aliasing. | ||
| 87 | * In addition, this controls the spacing of cacheline-spaced size classes. | ||
| 88 | * | ||
| 89 | * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can | ||
| 90 | * only handle raw constants. | ||
| 91 | */ | ||
| 92 | #define LG_CACHELINE 6 | ||
| 93 | #define CACHELINE 64 | ||
| 94 | #define CACHELINE_MASK (CACHELINE - 1) | ||
| 95 | |||
| 96 | /* Return the smallest cacheline multiple that is >= s. */ | ||
| 97 | #define CACHELINE_CEILING(s) \ | ||
| 98 | (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) | ||
| 99 | |||
| 100 | /* Return the nearest aligned address at or below a. */ | ||
| 101 | #define ALIGNMENT_ADDR2BASE(a, alignment) \ | ||
| 102 | ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) | ||
| 103 | |||
| 104 | /* Return the offset between a and the nearest aligned address at or below a. */ | ||
| 105 | #define ALIGNMENT_ADDR2OFFSET(a, alignment) \ | ||
| 106 | ((size_t)((uintptr_t)(a) & (alignment - 1))) | ||
| 107 | |||
| 108 | /* Return the smallest alignment multiple that is >= s. */ | ||
| 109 | #define ALIGNMENT_CEILING(s, alignment) \ | ||
| 110 | (((s) + (alignment - 1)) & ((~(alignment)) + 1)) | ||
| 111 | |||
| 112 | /* Declare a variable-length array. */ | ||
| 113 | #if __STDC_VERSION__ < 199901L | ||
| 114 | # ifdef _MSC_VER | ||
| 115 | # include <malloc.h> | ||
| 116 | # define alloca _alloca | ||
| 117 | # else | ||
| 118 | # ifdef JEMALLOC_HAS_ALLOCA_H | ||
| 119 | # include <alloca.h> | ||
| 120 | # else | ||
| 121 | # include <stdlib.h> | ||
| 122 | # endif | ||
| 123 | # endif | ||
| 124 | # define VARIABLE_ARRAY(type, name, count) \ | ||
| 125 | type *name = alloca(sizeof(type) * (count)) | ||
| 126 | #else | ||
| 127 | # define VARIABLE_ARRAY(type, name, count) type name[(count)] | ||
| 128 | #endif | ||
| 129 | |||
| 130 | #endif /* JEMALLOC_INTERNAL_TYPES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in deleted file mode 100644 index 5ce77d9..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in +++ /dev/null | |||
| @@ -1,263 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_PREAMBLE_H | ||
| 2 | #define JEMALLOC_PREAMBLE_H | ||
| 3 | |||
| 4 | #include "jemalloc_internal_defs.h" | ||
| 5 | #include "jemalloc/internal/jemalloc_internal_decls.h" | ||
| 6 | |||
| 7 | #if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL) | ||
| 8 | #include <sys/ktrace.h> | ||
| 9 | # if defined(JEMALLOC_UTRACE) | ||
| 10 | # define UTRACE_CALL(p, l) utrace(p, l) | ||
| 11 | # else | ||
| 12 | # define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l) | ||
| 13 | # define JEMALLOC_UTRACE | ||
| 14 | # endif | ||
| 15 | #endif | ||
| 16 | |||
| 17 | #define JEMALLOC_NO_DEMANGLE | ||
| 18 | #ifdef JEMALLOC_JET | ||
| 19 | # undef JEMALLOC_IS_MALLOC | ||
| 20 | # define JEMALLOC_N(n) jet_##n | ||
| 21 | # include "jemalloc/internal/public_namespace.h" | ||
| 22 | # define JEMALLOC_NO_RENAME | ||
| 23 | # include "../jemalloc@install_suffix@.h" | ||
| 24 | # undef JEMALLOC_NO_RENAME | ||
| 25 | #else | ||
| 26 | # define JEMALLOC_N(n) @private_namespace@##n | ||
| 27 | # include "../jemalloc@install_suffix@.h" | ||
| 28 | #endif | ||
| 29 | |||
| 30 | #if defined(JEMALLOC_OSATOMIC) | ||
| 31 | #include <libkern/OSAtomic.h> | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #ifdef JEMALLOC_ZONE | ||
| 35 | #include <mach/mach_error.h> | ||
| 36 | #include <mach/mach_init.h> | ||
| 37 | #include <mach/vm_map.h> | ||
| 38 | #endif | ||
| 39 | |||
| 40 | #include "jemalloc/internal/jemalloc_internal_macros.h" | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Note that the ordering matters here; the hook itself is name-mangled. We | ||
| 44 | * want the inclusion of hooks to happen early, so that we hook as much as | ||
| 45 | * possible. | ||
| 46 | */ | ||
| 47 | #ifndef JEMALLOC_NO_PRIVATE_NAMESPACE | ||
| 48 | # ifndef JEMALLOC_JET | ||
| 49 | # include "jemalloc/internal/private_namespace.h" | ||
| 50 | # else | ||
| 51 | # include "jemalloc/internal/private_namespace_jet.h" | ||
| 52 | # endif | ||
| 53 | #endif | ||
| 54 | #include "jemalloc/internal/test_hooks.h" | ||
| 55 | |||
| 56 | #ifdef JEMALLOC_DEFINE_MADVISE_FREE | ||
| 57 | # define JEMALLOC_MADV_FREE 8 | ||
| 58 | #endif | ||
| 59 | |||
| 60 | static const bool config_debug = | ||
| 61 | #ifdef JEMALLOC_DEBUG | ||
| 62 | true | ||
| 63 | #else | ||
| 64 | false | ||
| 65 | #endif | ||
| 66 | ; | ||
| 67 | static const bool have_dss = | ||
| 68 | #ifdef JEMALLOC_DSS | ||
| 69 | true | ||
| 70 | #else | ||
| 71 | false | ||
| 72 | #endif | ||
| 73 | ; | ||
| 74 | static const bool have_madvise_huge = | ||
| 75 | #ifdef JEMALLOC_HAVE_MADVISE_HUGE | ||
| 76 | true | ||
| 77 | #else | ||
| 78 | false | ||
| 79 | #endif | ||
| 80 | ; | ||
| 81 | static const bool config_fill = | ||
| 82 | #ifdef JEMALLOC_FILL | ||
| 83 | true | ||
| 84 | #else | ||
| 85 | false | ||
| 86 | #endif | ||
| 87 | ; | ||
| 88 | static const bool config_lazy_lock = | ||
| 89 | #ifdef JEMALLOC_LAZY_LOCK | ||
| 90 | true | ||
| 91 | #else | ||
| 92 | false | ||
| 93 | #endif | ||
| 94 | ; | ||
| 95 | static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; | ||
| 96 | static const bool config_prof = | ||
| 97 | #ifdef JEMALLOC_PROF | ||
| 98 | true | ||
| 99 | #else | ||
| 100 | false | ||
| 101 | #endif | ||
| 102 | ; | ||
| 103 | static const bool config_prof_libgcc = | ||
| 104 | #ifdef JEMALLOC_PROF_LIBGCC | ||
| 105 | true | ||
| 106 | #else | ||
| 107 | false | ||
| 108 | #endif | ||
| 109 | ; | ||
| 110 | static const bool config_prof_libunwind = | ||
| 111 | #ifdef JEMALLOC_PROF_LIBUNWIND | ||
| 112 | true | ||
| 113 | #else | ||
| 114 | false | ||
| 115 | #endif | ||
| 116 | ; | ||
| 117 | static const bool maps_coalesce = | ||
| 118 | #ifdef JEMALLOC_MAPS_COALESCE | ||
| 119 | true | ||
| 120 | #else | ||
| 121 | false | ||
| 122 | #endif | ||
| 123 | ; | ||
| 124 | static const bool config_stats = | ||
| 125 | #ifdef JEMALLOC_STATS | ||
| 126 | true | ||
| 127 | #else | ||
| 128 | false | ||
| 129 | #endif | ||
| 130 | ; | ||
| 131 | static const bool config_tls = | ||
| 132 | #ifdef JEMALLOC_TLS | ||
| 133 | true | ||
| 134 | #else | ||
| 135 | false | ||
| 136 | #endif | ||
| 137 | ; | ||
| 138 | static const bool config_utrace = | ||
| 139 | #ifdef JEMALLOC_UTRACE | ||
| 140 | true | ||
| 141 | #else | ||
| 142 | false | ||
| 143 | #endif | ||
| 144 | ; | ||
| 145 | static const bool config_xmalloc = | ||
| 146 | #ifdef JEMALLOC_XMALLOC | ||
| 147 | true | ||
| 148 | #else | ||
| 149 | false | ||
| 150 | #endif | ||
| 151 | ; | ||
| 152 | static const bool config_cache_oblivious = | ||
| 153 | #ifdef JEMALLOC_CACHE_OBLIVIOUS | ||
| 154 | true | ||
| 155 | #else | ||
| 156 | false | ||
| 157 | #endif | ||
| 158 | ; | ||
| 159 | /* | ||
| 160 | * Undocumented, for jemalloc development use only at the moment. See the note | ||
| 161 | * in jemalloc/internal/log.h. | ||
| 162 | */ | ||
| 163 | static const bool config_log = | ||
| 164 | #ifdef JEMALLOC_LOG | ||
| 165 | true | ||
| 166 | #else | ||
| 167 | false | ||
| 168 | #endif | ||
| 169 | ; | ||
| 170 | /* | ||
| 171 | * Are extra safety checks enabled; things like checking the size of sized | ||
| 172 | * deallocations, double-frees, etc. | ||
| 173 | */ | ||
| 174 | static const bool config_opt_safety_checks = | ||
| 175 | #ifdef JEMALLOC_OPT_SAFETY_CHECKS | ||
| 176 | true | ||
| 177 | #elif defined(JEMALLOC_DEBUG) | ||
| 178 | /* | ||
| 179 | * This lets us only guard safety checks by one flag instead of two; fast | ||
| 180 | * checks can guard solely by config_opt_safety_checks and run in debug mode | ||
| 181 | * too. | ||
| 182 | */ | ||
| 183 | true | ||
| 184 | #else | ||
| 185 | false | ||
| 186 | #endif | ||
| 187 | ; | ||
| 188 | |||
| 189 | /* | ||
| 190 | * Extra debugging of sized deallocations too onerous to be included in the | ||
| 191 | * general safety checks. | ||
| 192 | */ | ||
| 193 | static const bool config_opt_size_checks = | ||
| 194 | #if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG) | ||
| 195 | true | ||
| 196 | #else | ||
| 197 | false | ||
| 198 | #endif | ||
| 199 | ; | ||
| 200 | |||
| 201 | static const bool config_uaf_detection = | ||
| 202 | #if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG) | ||
| 203 | true | ||
| 204 | #else | ||
| 205 | false | ||
| 206 | #endif | ||
| 207 | ; | ||
| 208 | |||
| 209 | /* Whether or not the C++ extensions are enabled. */ | ||
| 210 | static const bool config_enable_cxx = | ||
| 211 | #ifdef JEMALLOC_ENABLE_CXX | ||
| 212 | true | ||
| 213 | #else | ||
| 214 | false | ||
| 215 | #endif | ||
| 216 | ; | ||
| 217 | |||
| 218 | #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) | ||
| 219 | /* Currently percpu_arena depends on sched_getcpu. */ | ||
| 220 | #define JEMALLOC_PERCPU_ARENA | ||
| 221 | #endif | ||
| 222 | static const bool have_percpu_arena = | ||
| 223 | #ifdef JEMALLOC_PERCPU_ARENA | ||
| 224 | true | ||
| 225 | #else | ||
| 226 | false | ||
| 227 | #endif | ||
| 228 | ; | ||
| 229 | /* | ||
| 230 | * Undocumented, and not recommended; the application should take full | ||
| 231 | * responsibility for tracking provenance. | ||
| 232 | */ | ||
| 233 | static const bool force_ivsalloc = | ||
| 234 | #ifdef JEMALLOC_FORCE_IVSALLOC | ||
| 235 | true | ||
| 236 | #else | ||
| 237 | false | ||
| 238 | #endif | ||
| 239 | ; | ||
| 240 | static const bool have_background_thread = | ||
| 241 | #ifdef JEMALLOC_BACKGROUND_THREAD | ||
| 242 | true | ||
| 243 | #else | ||
| 244 | false | ||
| 245 | #endif | ||
| 246 | ; | ||
| 247 | static const bool config_high_res_timer = | ||
| 248 | #ifdef JEMALLOC_HAVE_CLOCK_REALTIME | ||
| 249 | true | ||
| 250 | #else | ||
| 251 | false | ||
| 252 | #endif | ||
| 253 | ; | ||
| 254 | |||
| 255 | static const bool have_memcntl = | ||
| 256 | #ifdef JEMALLOC_HAVE_MEMCNTL | ||
| 257 | true | ||
| 258 | #else | ||
| 259 | false | ||
| 260 | #endif | ||
| 261 | ; | ||
| 262 | |||
| 263 | #endif /* JEMALLOC_PREAMBLE_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/large_externs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/large_externs.h deleted file mode 100644 index 8e09122..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/large_externs.h +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_LARGE_EXTERNS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/hook.h" | ||
| 5 | |||
| 6 | void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); | ||
| 7 | void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, | ||
| 8 | bool zero); | ||
| 9 | bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min, | ||
| 10 | size_t usize_max, bool zero); | ||
| 11 | void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, | ||
| 12 | size_t alignment, bool zero, tcache_t *tcache, | ||
| 13 | hook_ralloc_args_t *hook_args); | ||
| 14 | |||
| 15 | void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata); | ||
| 16 | void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata); | ||
| 17 | void large_dalloc(tsdn_t *tsdn, edata_t *edata); | ||
| 18 | size_t large_salloc(tsdn_t *tsdn, const edata_t *edata); | ||
| 19 | void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, | ||
| 20 | bool reset_recent); | ||
| 21 | void large_prof_tctx_reset(edata_t *edata); | ||
| 22 | void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size); | ||
| 23 | |||
| 24 | #endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/lockedint.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/lockedint.h deleted file mode 100644 index d020ebe..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/lockedint.h +++ /dev/null | |||
| @@ -1,204 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_LOCKEDINT_H | ||
| 2 | #define JEMALLOC_INTERNAL_LOCKEDINT_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * In those architectures that support 64-bit atomics, we use atomic updates for | ||
| 6 | * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize | ||
| 7 | * externally. | ||
| 8 | */ | ||
| 9 | |||
| 10 | typedef struct locked_u64_s locked_u64_t; | ||
| 11 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 12 | struct locked_u64_s { | ||
| 13 | atomic_u64_t val; | ||
| 14 | }; | ||
| 15 | #else | ||
| 16 | /* Must hold the associated mutex. */ | ||
| 17 | struct locked_u64_s { | ||
| 18 | uint64_t val; | ||
| 19 | }; | ||
| 20 | #endif | ||
| 21 | |||
| 22 | typedef struct locked_zu_s locked_zu_t; | ||
| 23 | struct locked_zu_s { | ||
| 24 | atomic_zu_t val; | ||
| 25 | }; | ||
| 26 | |||
| 27 | #ifndef JEMALLOC_ATOMIC_U64 | ||
| 28 | # define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name; | ||
| 29 | # define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \ | ||
| 30 | malloc_mutex_init(&(mu), name, rank, rank_mode) | ||
| 31 | # define LOCKEDINT_MTX(mtx) (&(mtx)) | ||
| 32 | # define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu)) | ||
| 33 | # define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu)) | ||
| 34 | # define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu)) | ||
| 35 | # define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \ | ||
| 36 | malloc_mutex_postfork_parent(tsdn, &(mu)) | ||
| 37 | # define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \ | ||
| 38 | malloc_mutex_postfork_child(tsdn, &(mu)) | ||
| 39 | #else | ||
| 40 | # define LOCKEDINT_MTX_DECLARE(name) | ||
| 41 | # define LOCKEDINT_MTX(mtx) NULL | ||
| 42 | # define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false | ||
| 43 | # define LOCKEDINT_MTX_LOCK(tsdn, mu) | ||
| 44 | # define LOCKEDINT_MTX_UNLOCK(tsdn, mu) | ||
| 45 | # define LOCKEDINT_MTX_PREFORK(tsdn, mu) | ||
| 46 | # define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) | ||
| 47 | # define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) | ||
| 48 | #endif | ||
| 49 | |||
| 50 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 51 | # define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL) | ||
| 52 | #else | ||
| 53 | # define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \ | ||
| 54 | malloc_mutex_assert_owner(tsdn, (mtx)) | ||
| 55 | #endif | ||
| 56 | |||
| 57 | static inline uint64_t | ||
| 58 | locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) { | ||
| 59 | LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); | ||
| 60 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 61 | return atomic_load_u64(&p->val, ATOMIC_RELAXED); | ||
| 62 | #else | ||
| 63 | return p->val; | ||
| 64 | #endif | ||
| 65 | } | ||
| 66 | |||
| 67 | static inline void | ||
| 68 | locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, | ||
| 69 | uint64_t x) { | ||
| 70 | LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); | ||
| 71 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 72 | atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED); | ||
| 73 | #else | ||
| 74 | p->val += x; | ||
| 75 | #endif | ||
| 76 | } | ||
| 77 | |||
| 78 | static inline void | ||
| 79 | locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, | ||
| 80 | uint64_t x) { | ||
| 81 | LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); | ||
| 82 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 83 | uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED); | ||
| 84 | assert(r - x <= r); | ||
| 85 | #else | ||
| 86 | p->val -= x; | ||
| 87 | assert(p->val + x >= p->val); | ||
| 88 | #endif | ||
| 89 | } | ||
| 90 | |||
| 91 | /* Increment and take modulus. Returns whether the modulo made any change. */ | ||
| 92 | static inline bool | ||
| 93 | locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, | ||
| 94 | const uint64_t x, const uint64_t modulus) { | ||
| 95 | LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); | ||
| 96 | uint64_t before, after; | ||
| 97 | bool overflow; | ||
| 98 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 99 | before = atomic_load_u64(&p->val, ATOMIC_RELAXED); | ||
| 100 | do { | ||
| 101 | after = before + x; | ||
| 102 | assert(after >= before); | ||
| 103 | overflow = (after >= modulus); | ||
| 104 | if (overflow) { | ||
| 105 | after %= modulus; | ||
| 106 | } | ||
| 107 | } while (!atomic_compare_exchange_weak_u64(&p->val, &before, after, | ||
| 108 | ATOMIC_RELAXED, ATOMIC_RELAXED)); | ||
| 109 | #else | ||
| 110 | before = p->val; | ||
| 111 | after = before + x; | ||
| 112 | overflow = (after >= modulus); | ||
| 113 | if (overflow) { | ||
| 114 | after %= modulus; | ||
| 115 | } | ||
| 116 | p->val = after; | ||
| 117 | #endif | ||
| 118 | return overflow; | ||
| 119 | } | ||
| 120 | |||
| 121 | /* | ||
| 122 | * Non-atomically sets *dst += src. *dst needs external synchronization. | ||
| 123 | * This lets us avoid the cost of a fetch_add when its unnecessary (note that | ||
| 124 | * the types here are atomic). | ||
| 125 | */ | ||
| 126 | static inline void | ||
| 127 | locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) { | ||
| 128 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 129 | uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED); | ||
| 130 | atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED); | ||
| 131 | #else | ||
| 132 | dst->val += src; | ||
| 133 | #endif | ||
| 134 | } | ||
| 135 | |||
| 136 | static inline uint64_t | ||
| 137 | locked_read_u64_unsynchronized(locked_u64_t *p) { | ||
| 138 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 139 | return atomic_load_u64(&p->val, ATOMIC_RELAXED); | ||
| 140 | #else | ||
| 141 | return p->val; | ||
| 142 | #endif | ||
| 143 | } | ||
| 144 | |||
| 145 | static inline void | ||
| 146 | locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) { | ||
| 147 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 148 | atomic_store_u64(&p->val, x, ATOMIC_RELAXED); | ||
| 149 | #else | ||
| 150 | p->val = x; | ||
| 151 | #endif | ||
| 152 | } | ||
| 153 | |||
| 154 | static inline size_t | ||
| 155 | locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) { | ||
| 156 | LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); | ||
| 157 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 158 | return atomic_load_zu(&p->val, ATOMIC_RELAXED); | ||
| 159 | #else | ||
| 160 | return atomic_load_zu(&p->val, ATOMIC_RELAXED); | ||
| 161 | #endif | ||
| 162 | } | ||
| 163 | |||
| 164 | static inline void | ||
| 165 | locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, | ||
| 166 | size_t x) { | ||
| 167 | LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); | ||
| 168 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 169 | atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED); | ||
| 170 | #else | ||
| 171 | size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED); | ||
| 172 | atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED); | ||
| 173 | #endif | ||
| 174 | } | ||
| 175 | |||
| 176 | static inline void | ||
| 177 | locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, | ||
| 178 | size_t x) { | ||
| 179 | LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); | ||
| 180 | #ifdef JEMALLOC_ATOMIC_U64 | ||
| 181 | size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED); | ||
| 182 | assert(r - x <= r); | ||
| 183 | #else | ||
| 184 | size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED); | ||
| 185 | atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED); | ||
| 186 | #endif | ||
| 187 | } | ||
| 188 | |||
| 189 | /* Like the _u64 variant, needs an externally synchronized *dst. */ | ||
| 190 | static inline void | ||
| 191 | locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) { | ||
| 192 | size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED); | ||
| 193 | atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED); | ||
| 194 | } | ||
| 195 | |||
| 196 | /* | ||
| 197 | * Unlike the _u64 variant, this is safe to call unconditionally. | ||
| 198 | */ | ||
| 199 | static inline size_t | ||
| 200 | locked_read_atomic_zu(locked_zu_t *p) { | ||
| 201 | return atomic_load_zu(&p->val, ATOMIC_RELAXED); | ||
| 202 | } | ||
| 203 | |||
| 204 | #endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/log.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/log.h deleted file mode 100644 index 6420858..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/log.h +++ /dev/null | |||
| @@ -1,115 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_LOG_H | ||
| 2 | #define JEMALLOC_INTERNAL_LOG_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/malloc_io.h" | ||
| 6 | #include "jemalloc/internal/mutex.h" | ||
| 7 | |||
| 8 | #ifdef JEMALLOC_LOG | ||
| 9 | # define JEMALLOC_LOG_VAR_BUFSIZE 1000 | ||
| 10 | #else | ||
| 11 | # define JEMALLOC_LOG_VAR_BUFSIZE 1 | ||
| 12 | #endif | ||
| 13 | |||
| 14 | #define JEMALLOC_LOG_BUFSIZE 4096 | ||
| 15 | |||
| 16 | /* | ||
| 17 | * The log malloc_conf option is a '|'-delimited list of log_var name segments | ||
| 18 | * which should be logged. The names are themselves hierarchical, with '.' as | ||
| 19 | * the delimiter (a "segment" is just a prefix in the log namespace). So, if | ||
| 20 | * you have: | ||
| 21 | * | ||
| 22 | * log("arena", "log msg for arena"); // 1 | ||
| 23 | * log("arena.a", "log msg for arena.a"); // 2 | ||
| 24 | * log("arena.b", "log msg for arena.b"); // 3 | ||
| 25 | * log("arena.a.a", "log msg for arena.a.a"); // 4 | ||
| 26 | * log("extent.a", "log msg for extent.a"); // 5 | ||
| 27 | * log("extent.b", "log msg for extent.b"); // 6 | ||
| 28 | * | ||
| 29 | * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and | ||
| 30 | * 6 will print at runtime. You can enable logging from all log vars by | ||
| 31 | * writing "log=.". | ||
| 32 | * | ||
| 33 | * None of this should be regarded as a stable API for right now. It's intended | ||
| 34 | * as a debugging interface, to let us keep around some of our printf-debugging | ||
| 35 | * statements. | ||
| 36 | */ | ||
| 37 | |||
| 38 | extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; | ||
| 39 | extern atomic_b_t log_init_done; | ||
| 40 | |||
| 41 | typedef struct log_var_s log_var_t; | ||
| 42 | struct log_var_s { | ||
| 43 | /* | ||
| 44 | * Lowest bit is "inited", second lowest is "enabled". Putting them in | ||
| 45 | * a single word lets us avoid any fences on weak architectures. | ||
| 46 | */ | ||
| 47 | atomic_u_t state; | ||
| 48 | const char *name; | ||
| 49 | }; | ||
| 50 | |||
| 51 | #define LOG_NOT_INITIALIZED 0U | ||
| 52 | #define LOG_INITIALIZED_NOT_ENABLED 1U | ||
| 53 | #define LOG_ENABLED 2U | ||
| 54 | |||
| 55 | #define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str} | ||
| 56 | |||
| 57 | /* | ||
| 58 | * Returns the value we should assume for state (which is not necessarily | ||
| 59 | * accurate; if logging is done before logging has finished initializing, then | ||
| 60 | * we default to doing the safe thing by logging everything). | ||
| 61 | */ | ||
| 62 | unsigned log_var_update_state(log_var_t *log_var); | ||
| 63 | |||
| 64 | /* We factor out the metadata management to allow us to test more easily. */ | ||
| 65 | #define log_do_begin(log_var) \ | ||
| 66 | if (config_log) { \ | ||
| 67 | unsigned log_state = atomic_load_u(&(log_var).state, \ | ||
| 68 | ATOMIC_RELAXED); \ | ||
| 69 | if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \ | ||
| 70 | log_state = log_var_update_state(&(log_var)); \ | ||
| 71 | assert(log_state != LOG_NOT_INITIALIZED); \ | ||
| 72 | } \ | ||
| 73 | if (log_state == LOG_ENABLED) { \ | ||
| 74 | { | ||
| 75 | /* User code executes here. */ | ||
| 76 | #define log_do_end(log_var) \ | ||
| 77 | } \ | ||
| 78 | } \ | ||
| 79 | } | ||
| 80 | |||
| 81 | /* | ||
| 82 | * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during | ||
| 83 | * preprocessing. To work around this, we take all potential extra arguments in | ||
| 84 | * a var-args functions. Since a varargs macro needs at least one argument in | ||
| 85 | * the "...", we accept the format string there, and require that the first | ||
| 86 | * argument in this "..." is a const char *. | ||
| 87 | */ | ||
| 88 | static inline void | ||
| 89 | log_impl_varargs(const char *name, ...) { | ||
| 90 | char buf[JEMALLOC_LOG_BUFSIZE]; | ||
| 91 | va_list ap; | ||
| 92 | |||
| 93 | va_start(ap, name); | ||
| 94 | const char *format = va_arg(ap, const char *); | ||
| 95 | size_t dst_offset = 0; | ||
| 96 | dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name); | ||
| 97 | dst_offset += malloc_vsnprintf(buf + dst_offset, | ||
| 98 | JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap); | ||
| 99 | dst_offset += malloc_snprintf(buf + dst_offset, | ||
| 100 | JEMALLOC_LOG_BUFSIZE - dst_offset, "\n"); | ||
| 101 | va_end(ap); | ||
| 102 | |||
| 103 | malloc_write(buf); | ||
| 104 | } | ||
| 105 | |||
| 106 | /* Call as log("log.var.str", "format_string %d", arg_for_format_string); */ | ||
| 107 | #define LOG(log_var_str, ...) \ | ||
| 108 | do { \ | ||
| 109 | static log_var_t log_var = LOG_VAR_INIT(log_var_str); \ | ||
| 110 | log_do_begin(log_var) \ | ||
| 111 | log_impl_varargs((log_var).name, __VA_ARGS__); \ | ||
| 112 | log_do_end(log_var) \ | ||
| 113 | } while (0) | ||
| 114 | |||
| 115 | #endif /* JEMALLOC_INTERNAL_LOG_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/malloc_io.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/malloc_io.h deleted file mode 100644 index a375bda..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/malloc_io.h +++ /dev/null | |||
| @@ -1,105 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_MALLOC_IO_H | ||
| 2 | #define JEMALLOC_INTERNAL_MALLOC_IO_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 5 | |||
| 6 | #ifdef _WIN32 | ||
| 7 | # ifdef _WIN64 | ||
| 8 | # define FMT64_PREFIX "ll" | ||
| 9 | # define FMTPTR_PREFIX "ll" | ||
| 10 | # else | ||
| 11 | # define FMT64_PREFIX "ll" | ||
| 12 | # define FMTPTR_PREFIX "" | ||
| 13 | # endif | ||
| 14 | # define FMTd32 "d" | ||
| 15 | # define FMTu32 "u" | ||
| 16 | # define FMTx32 "x" | ||
| 17 | # define FMTd64 FMT64_PREFIX "d" | ||
| 18 | # define FMTu64 FMT64_PREFIX "u" | ||
| 19 | # define FMTx64 FMT64_PREFIX "x" | ||
| 20 | # define FMTdPTR FMTPTR_PREFIX "d" | ||
| 21 | # define FMTuPTR FMTPTR_PREFIX "u" | ||
| 22 | # define FMTxPTR FMTPTR_PREFIX "x" | ||
| 23 | #else | ||
| 24 | # include <inttypes.h> | ||
| 25 | # define FMTd32 PRId32 | ||
| 26 | # define FMTu32 PRIu32 | ||
| 27 | # define FMTx32 PRIx32 | ||
| 28 | # define FMTd64 PRId64 | ||
| 29 | # define FMTu64 PRIu64 | ||
| 30 | # define FMTx64 PRIx64 | ||
| 31 | # define FMTdPTR PRIdPTR | ||
| 32 | # define FMTuPTR PRIuPTR | ||
| 33 | # define FMTxPTR PRIxPTR | ||
| 34 | #endif | ||
| 35 | |||
| 36 | /* Size of stack-allocated buffer passed to buferror(). */ | ||
| 37 | #define BUFERROR_BUF 64 | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be | ||
| 41 | * large enough for all possible uses within jemalloc. | ||
| 42 | */ | ||
| 43 | #define MALLOC_PRINTF_BUFSIZE 4096 | ||
| 44 | |||
| 45 | write_cb_t wrtmessage; | ||
| 46 | int buferror(int err, char *buf, size_t buflen); | ||
| 47 | uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, | ||
| 48 | int base); | ||
| 49 | void malloc_write(const char *s); | ||
| 50 | |||
| 51 | /* | ||
| 52 | * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating | ||
| 53 | * point math. | ||
| 54 | */ | ||
| 55 | size_t malloc_vsnprintf(char *str, size_t size, const char *format, | ||
| 56 | va_list ap); | ||
| 57 | size_t malloc_snprintf(char *str, size_t size, const char *format, ...) | ||
| 58 | JEMALLOC_FORMAT_PRINTF(3, 4); | ||
| 59 | /* | ||
| 60 | * The caller can set write_cb to null to choose to print with the | ||
| 61 | * je_malloc_message hook. | ||
| 62 | */ | ||
| 63 | void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format, | ||
| 64 | va_list ap); | ||
| 65 | void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, | ||
| 66 | ...) JEMALLOC_FORMAT_PRINTF(3, 4); | ||
| 67 | void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); | ||
| 68 | |||
| 69 | static inline ssize_t | ||
| 70 | malloc_write_fd(int fd, const void *buf, size_t count) { | ||
| 71 | #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) | ||
| 72 | /* | ||
| 73 | * Use syscall(2) rather than write(2) when possible in order to avoid | ||
| 74 | * the possibility of memory allocation within libc. This is necessary | ||
| 75 | * on FreeBSD; most operating systems do not have this problem though. | ||
| 76 | * | ||
| 77 | * syscall() returns long or int, depending on platform, so capture the | ||
| 78 | * result in the widest plausible type to avoid compiler warnings. | ||
| 79 | */ | ||
| 80 | long result = syscall(SYS_write, fd, buf, count); | ||
| 81 | #else | ||
| 82 | ssize_t result = (ssize_t)write(fd, buf, | ||
| 83 | #ifdef _WIN32 | ||
| 84 | (unsigned int) | ||
| 85 | #endif | ||
| 86 | count); | ||
| 87 | #endif | ||
| 88 | return (ssize_t)result; | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline ssize_t | ||
| 92 | malloc_read_fd(int fd, void *buf, size_t count) { | ||
| 93 | #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) | ||
| 94 | long result = syscall(SYS_read, fd, buf, count); | ||
| 95 | #else | ||
| 96 | ssize_t result = read(fd, buf, | ||
| 97 | #ifdef _WIN32 | ||
| 98 | (unsigned int) | ||
| 99 | #endif | ||
| 100 | count); | ||
| 101 | #endif | ||
| 102 | return (ssize_t)result; | ||
| 103 | } | ||
| 104 | |||
| 105 | #endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mpsc_queue.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mpsc_queue.h deleted file mode 100644 index 316ea9b..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mpsc_queue.h +++ /dev/null | |||
| @@ -1,134 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H | ||
| 2 | #define JEMALLOC_INTERNAL_MPSC_QUEUE_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * A concurrent implementation of a multi-producer, single-consumer queue. It | ||
| 8 | * supports three concurrent operations: | ||
| 9 | * - Push | ||
| 10 | * - Push batch | ||
| 11 | * - Pop batch | ||
| 12 | * | ||
| 13 | * These operations are all lock-free. | ||
| 14 | * | ||
| 15 | * The implementation is the simple two-stack queue built on a Treiber stack. | ||
| 16 | * It's not terribly efficient, but this isn't expected to go into anywhere with | ||
| 17 | * hot code. In fact, we don't really even need queue semantics in any | ||
| 18 | * anticipated use cases; we could get away with just the stack. But this way | ||
| 19 | * lets us frame the API in terms of the existing list types, which is a nice | ||
| 20 | * convenience. We can save on cache misses by introducing our own (parallel) | ||
| 21 | * single-linked list type here, and dropping FIFO semantics, if we need this to | ||
| 22 | * get faster. Since we're currently providing queue semantics though, we use | ||
| 23 | * the prev field in the link rather than the next field for Treiber-stack | ||
| 24 | * linkage, so that we can preserve order for bash-pushed lists (recall that the | ||
| 25 | * two-stack tricks reverses orders in the lock-free first stack). | ||
| 26 | */ | ||
| 27 | |||
| 28 | #define mpsc_queue(a_type) \ | ||
| 29 | struct { \ | ||
| 30 | atomic_p_t tail; \ | ||
| 31 | } | ||
| 32 | |||
| 33 | #define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \ | ||
| 34 | a_list_type) \ | ||
| 35 | /* Initialize a queue. */ \ | ||
| 36 | a_attr void \ | ||
| 37 | a_prefix##new(a_queue_type *queue); \ | ||
| 38 | /* Insert all items in src into the queue, clearing src. */ \ | ||
| 39 | a_attr void \ | ||
| 40 | a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \ | ||
| 41 | /* Insert node into the queue. */ \ | ||
| 42 | a_attr void \ | ||
| 43 | a_prefix##push(a_queue_type *queue, a_type *node); \ | ||
| 44 | /* \ | ||
| 45 | * Pop all items in the queue into the list at dst. dst should already \ | ||
| 46 | * be initialized (and may contain existing items, which then remain \ | ||
| 47 | * in dst). \ | ||
| 48 | */ \ | ||
| 49 | a_attr void \ | ||
| 50 | a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst); | ||
| 51 | |||
| 52 | #define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \ | ||
| 53 | a_list_type, a_link) \ | ||
| 54 | a_attr void \ | ||
| 55 | a_prefix##new(a_queue_type *queue) { \ | ||
| 56 | atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \ | ||
| 57 | } \ | ||
| 58 | a_attr void \ | ||
| 59 | a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \ | ||
| 60 | /* \ | ||
| 61 | * Reuse the ql list next field as the Treiber stack next \ | ||
| 62 | * field. \ | ||
| 63 | */ \ | ||
| 64 | a_type *first = ql_first(src); \ | ||
| 65 | a_type *last = ql_last(src, a_link); \ | ||
| 66 | void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \ | ||
| 67 | do { \ | ||
| 68 | /* \ | ||
| 69 | * Note that this breaks the queue ring structure; \ | ||
| 70 | * it's not a ring any more! \ | ||
| 71 | */ \ | ||
| 72 | first->a_link.qre_prev = cur_tail; \ | ||
| 73 | /* \ | ||
| 74 | * Note: the upcoming CAS doesn't need an atomic; every \ | ||
| 75 | * push only needs to synchronize with the next pop, \ | ||
| 76 | * which we get from the release sequence rules. \ | ||
| 77 | */ \ | ||
| 78 | } while (!atomic_compare_exchange_weak_p(&queue->tail, \ | ||
| 79 | &cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \ | ||
| 80 | ql_new(src); \ | ||
| 81 | } \ | ||
| 82 | a_attr void \ | ||
| 83 | a_prefix##push(a_queue_type *queue, a_type *node) { \ | ||
| 84 | ql_elm_new(node, a_link); \ | ||
| 85 | a_list_type list; \ | ||
| 86 | ql_new(&list); \ | ||
| 87 | ql_head_insert(&list, node, a_link); \ | ||
| 88 | a_prefix##push_batch(queue, &list); \ | ||
| 89 | } \ | ||
| 90 | a_attr void \ | ||
| 91 | a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \ | ||
| 92 | a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \ | ||
| 93 | if (tail == NULL) { \ | ||
| 94 | /* \ | ||
| 95 | * In the common special case where there are no \ | ||
| 96 | * pending elements, bail early without a costly RMW. \ | ||
| 97 | */ \ | ||
| 98 | return; \ | ||
| 99 | } \ | ||
| 100 | tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \ | ||
| 101 | /* \ | ||
| 102 | * It's a single-consumer queue, so if cur started non-NULL, \ | ||
| 103 | * it'd better stay non-NULL. \ | ||
| 104 | */ \ | ||
| 105 | assert(tail != NULL); \ | ||
| 106 | /* \ | ||
| 107 | * We iterate through the stack and both fix up the link \ | ||
| 108 | * structure (stack insertion broke the list requirement that \ | ||
| 109 | * the list be circularly linked). It's just as efficient at \ | ||
| 110 | * this point to make the queue a "real" queue, so do that as \ | ||
| 111 | * well. \ | ||
| 112 | * If this ever gets to be a hot spot, we can omit this fixup \ | ||
| 113 | * and make the queue a bag (i.e. not necessarily ordered), but \ | ||
| 114 | * that would mean jettisoning the existing list API as the \ | ||
| 115 | * batch pushing/popping interface. \ | ||
| 116 | */ \ | ||
| 117 | a_list_type reversed; \ | ||
| 118 | ql_new(&reversed); \ | ||
| 119 | while (tail != NULL) { \ | ||
| 120 | /* \ | ||
| 121 | * Pop an item off the stack, prepend it onto the list \ | ||
| 122 | * (reversing the order). Recall that we use the \ | ||
| 123 | * list prev field as the Treiber stack next field to \ | ||
| 124 | * preserve order of batch-pushed items when reversed. \ | ||
| 125 | */ \ | ||
| 126 | a_type *next = tail->a_link.qre_prev; \ | ||
| 127 | ql_elm_new(tail, a_link); \ | ||
| 128 | ql_head_insert(&reversed, tail, a_link); \ | ||
| 129 | tail = next; \ | ||
| 130 | } \ | ||
| 131 | ql_concat(dst, &reversed, a_link); \ | ||
| 132 | } | ||
| 133 | |||
| 134 | #endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mutex.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mutex.h deleted file mode 100644 index 63a0b1b..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mutex.h +++ /dev/null | |||
| @@ -1,319 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_MUTEX_H | ||
| 2 | #define JEMALLOC_INTERNAL_MUTEX_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/mutex_prof.h" | ||
| 6 | #include "jemalloc/internal/tsd.h" | ||
| 7 | #include "jemalloc/internal/witness.h" | ||
| 8 | |||
| 9 | extern int64_t opt_mutex_max_spin; | ||
| 10 | |||
| 11 | typedef enum { | ||
| 12 | /* Can only acquire one mutex of a given witness rank at a time. */ | ||
| 13 | malloc_mutex_rank_exclusive, | ||
| 14 | /* | ||
| 15 | * Can acquire multiple mutexes of the same witness rank, but in | ||
| 16 | * address-ascending order only. | ||
| 17 | */ | ||
| 18 | malloc_mutex_address_ordered | ||
| 19 | } malloc_mutex_lock_order_t; | ||
| 20 | |||
| 21 | typedef struct malloc_mutex_s malloc_mutex_t; | ||
| 22 | struct malloc_mutex_s { | ||
| 23 | union { | ||
| 24 | struct { | ||
| 25 | /* | ||
| 26 | * prof_data is defined first to reduce cacheline | ||
| 27 | * bouncing: the data is not touched by the mutex holder | ||
| 28 | * during unlocking, while might be modified by | ||
| 29 | * contenders. Having it before the mutex itself could | ||
| 30 | * avoid prefetching a modified cacheline (for the | ||
| 31 | * unlocking thread). | ||
| 32 | */ | ||
| 33 | mutex_prof_data_t prof_data; | ||
| 34 | #ifdef _WIN32 | ||
| 35 | # if _WIN32_WINNT >= 0x0600 | ||
| 36 | SRWLOCK lock; | ||
| 37 | # else | ||
| 38 | CRITICAL_SECTION lock; | ||
| 39 | # endif | ||
| 40 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) | ||
| 41 | os_unfair_lock lock; | ||
| 42 | #elif (defined(JEMALLOC_MUTEX_INIT_CB)) | ||
| 43 | pthread_mutex_t lock; | ||
| 44 | malloc_mutex_t *postponed_next; | ||
| 45 | #else | ||
| 46 | pthread_mutex_t lock; | ||
| 47 | #endif | ||
| 48 | /* | ||
| 49 | * Hint flag to avoid exclusive cache line contention | ||
| 50 | * during spin waiting | ||
| 51 | */ | ||
| 52 | atomic_b_t locked; | ||
| 53 | }; | ||
| 54 | /* | ||
| 55 | * We only touch witness when configured w/ debug. However we | ||
| 56 | * keep the field in a union when !debug so that we don't have | ||
| 57 | * to pollute the code base with #ifdefs, while avoid paying the | ||
| 58 | * memory cost. | ||
| 59 | */ | ||
| 60 | #if !defined(JEMALLOC_DEBUG) | ||
| 61 | witness_t witness; | ||
| 62 | malloc_mutex_lock_order_t lock_order; | ||
| 63 | #endif | ||
| 64 | }; | ||
| 65 | |||
| 66 | #if defined(JEMALLOC_DEBUG) | ||
| 67 | witness_t witness; | ||
| 68 | malloc_mutex_lock_order_t lock_order; | ||
| 69 | #endif | ||
| 70 | }; | ||
| 71 | |||
| 72 | #ifdef _WIN32 | ||
| 73 | # if _WIN32_WINNT >= 0x0600 | ||
| 74 | # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) | ||
| 75 | # define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) | ||
| 76 | # define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) | ||
| 77 | # else | ||
| 78 | # define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) | ||
| 79 | # define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) | ||
| 80 | # define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) | ||
| 81 | # endif | ||
| 82 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) | ||
| 83 | # define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) | ||
| 84 | # define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) | ||
| 85 | # define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) | ||
| 86 | #else | ||
| 87 | # define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) | ||
| 88 | # define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) | ||
| 89 | # define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) | ||
| 90 | #endif | ||
| 91 | |||
| 92 | #define LOCK_PROF_DATA_INITIALIZER \ | ||
| 93 | {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ | ||
| 94 | ATOMIC_INIT(0), 0, NULL, 0} | ||
| 95 | |||
| 96 | #ifdef _WIN32 | ||
| 97 | # define MALLOC_MUTEX_INITIALIZER | ||
| 98 | #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) | ||
| 99 | # if defined(JEMALLOC_DEBUG) | ||
| 100 | # define MALLOC_MUTEX_INITIALIZER \ | ||
| 101 | {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \ | ||
| 102 | WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} | ||
| 103 | # else | ||
| 104 | # define MALLOC_MUTEX_INITIALIZER \ | ||
| 105 | {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \ | ||
| 106 | WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} | ||
| 107 | # endif | ||
| 108 | #elif (defined(JEMALLOC_MUTEX_INIT_CB)) | ||
| 109 | # if (defined(JEMALLOC_DEBUG)) | ||
| 110 | # define MALLOC_MUTEX_INITIALIZER \ | ||
| 111 | {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \ | ||
| 112 | WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} | ||
| 113 | # else | ||
| 114 | # define MALLOC_MUTEX_INITIALIZER \ | ||
| 115 | {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \ | ||
| 116 | WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} | ||
| 117 | # endif | ||
| 118 | |||
| 119 | #else | ||
| 120 | # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT | ||
| 121 | # if defined(JEMALLOC_DEBUG) | ||
| 122 | # define MALLOC_MUTEX_INITIALIZER \ | ||
| 123 | {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \ | ||
| 124 | WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} | ||
| 125 | # else | ||
| 126 | # define MALLOC_MUTEX_INITIALIZER \ | ||
| 127 | {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \ | ||
| 128 | WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} | ||
| 129 | # endif | ||
| 130 | #endif | ||
| 131 | |||
| 132 | #ifdef JEMALLOC_LAZY_LOCK | ||
| 133 | extern bool isthreaded; | ||
| 134 | #else | ||
| 135 | # undef isthreaded /* Undo private_namespace.h definition. */ | ||
| 136 | # define isthreaded true | ||
| 137 | #endif | ||
| 138 | |||
| 139 | bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, | ||
| 140 | witness_rank_t rank, malloc_mutex_lock_order_t lock_order); | ||
| 141 | void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); | ||
| 142 | void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); | ||
| 143 | void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); | ||
| 144 | bool malloc_mutex_boot(void); | ||
| 145 | void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); | ||
| 146 | |||
| 147 | void malloc_mutex_lock_slow(malloc_mutex_t *mutex); | ||
| 148 | |||
| 149 | static inline void | ||
| 150 | malloc_mutex_lock_final(malloc_mutex_t *mutex) { | ||
| 151 | MALLOC_MUTEX_LOCK(mutex); | ||
| 152 | atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline bool | ||
| 156 | malloc_mutex_trylock_final(malloc_mutex_t *mutex) { | ||
| 157 | return MALLOC_MUTEX_TRYLOCK(mutex); | ||
| 158 | } | ||
| 159 | |||
| 160 | static inline void | ||
| 161 | mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { | ||
| 162 | if (config_stats) { | ||
| 163 | mutex_prof_data_t *data = &mutex->prof_data; | ||
| 164 | data->n_lock_ops++; | ||
| 165 | if (data->prev_owner != tsdn) { | ||
| 166 | data->prev_owner = tsdn; | ||
| 167 | data->n_owner_switches++; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 172 | /* Trylock: return false if the lock is successfully acquired. */ | ||
| 173 | static inline bool | ||
| 174 | malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { | ||
| 175 | witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); | ||
| 176 | if (isthreaded) { | ||
| 177 | if (malloc_mutex_trylock_final(mutex)) { | ||
| 178 | atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); | ||
| 179 | return true; | ||
| 180 | } | ||
| 181 | mutex_owner_stats_update(tsdn, mutex); | ||
| 182 | } | ||
| 183 | witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); | ||
| 184 | |||
| 185 | return false; | ||
| 186 | } | ||
| 187 | |||
| 188 | /* Aggregate lock prof data. */ | ||
| 189 | static inline void | ||
| 190 | malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { | ||
| 191 | nstime_add(&sum->tot_wait_time, &data->tot_wait_time); | ||
| 192 | if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { | ||
| 193 | nstime_copy(&sum->max_wait_time, &data->max_wait_time); | ||
| 194 | } | ||
| 195 | |||
| 196 | sum->n_wait_times += data->n_wait_times; | ||
| 197 | sum->n_spin_acquired += data->n_spin_acquired; | ||
| 198 | |||
| 199 | if (sum->max_n_thds < data->max_n_thds) { | ||
| 200 | sum->max_n_thds = data->max_n_thds; | ||
| 201 | } | ||
| 202 | uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, | ||
| 203 | ATOMIC_RELAXED); | ||
| 204 | uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( | ||
| 205 | &data->n_waiting_thds, ATOMIC_RELAXED); | ||
| 206 | atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, | ||
| 207 | ATOMIC_RELAXED); | ||
| 208 | sum->n_owner_switches += data->n_owner_switches; | ||
| 209 | sum->n_lock_ops += data->n_lock_ops; | ||
| 210 | } | ||
| 211 | |||
| 212 | static inline void | ||
| 213 | malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { | ||
| 214 | witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); | ||
| 215 | if (isthreaded) { | ||
| 216 | if (malloc_mutex_trylock_final(mutex)) { | ||
| 217 | malloc_mutex_lock_slow(mutex); | ||
| 218 | atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); | ||
| 219 | } | ||
| 220 | mutex_owner_stats_update(tsdn, mutex); | ||
| 221 | } | ||
| 222 | witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); | ||
| 223 | } | ||
| 224 | |||
| 225 | static inline void | ||
| 226 | malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { | ||
| 227 | atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED); | ||
| 228 | witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); | ||
| 229 | if (isthreaded) { | ||
| 230 | MALLOC_MUTEX_UNLOCK(mutex); | ||
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | static inline void | ||
| 235 | malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { | ||
| 236 | witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline void | ||
| 240 | malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { | ||
| 241 | witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); | ||
| 242 | } | ||
| 243 | |||
| 244 | static inline void | ||
| 245 | malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) { | ||
| 246 | /* | ||
| 247 | * Not *really* allowed (we shouldn't be doing non-atomic loads of | ||
| 248 | * atomic data), but the mutex protection makes this safe, and writing | ||
| 249 | * a member-for-member copy is tedious for this situation. | ||
| 250 | */ | ||
| 251 | *dst = *source; | ||
| 252 | /* n_wait_thds is not reported (modified w/o locking). */ | ||
| 253 | atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED); | ||
| 254 | } | ||
| 255 | |||
| 256 | /* Copy the prof data from mutex for processing. */ | ||
| 257 | static inline void | ||
| 258 | malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, | ||
| 259 | malloc_mutex_t *mutex) { | ||
| 260 | /* Can only read holding the mutex. */ | ||
| 261 | malloc_mutex_assert_owner(tsdn, mutex); | ||
| 262 | malloc_mutex_prof_copy(data, &mutex->prof_data); | ||
| 263 | } | ||
| 264 | |||
| 265 | static inline void | ||
| 266 | malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data, | ||
| 267 | malloc_mutex_t *mutex) { | ||
| 268 | mutex_prof_data_t *source = &mutex->prof_data; | ||
| 269 | /* Can only read holding the mutex. */ | ||
| 270 | malloc_mutex_assert_owner(tsdn, mutex); | ||
| 271 | |||
| 272 | nstime_add(&data->tot_wait_time, &source->tot_wait_time); | ||
| 273 | if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) { | ||
| 274 | nstime_copy(&data->max_wait_time, &source->max_wait_time); | ||
| 275 | } | ||
| 276 | data->n_wait_times += source->n_wait_times; | ||
| 277 | data->n_spin_acquired += source->n_spin_acquired; | ||
| 278 | if (data->max_n_thds < source->max_n_thds) { | ||
| 279 | data->max_n_thds = source->max_n_thds; | ||
| 280 | } | ||
| 281 | /* n_wait_thds is not reported. */ | ||
| 282 | atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); | ||
| 283 | data->n_owner_switches += source->n_owner_switches; | ||
| 284 | data->n_lock_ops += source->n_lock_ops; | ||
| 285 | } | ||
| 286 | |||
| 287 | /* Compare the prof data and update to the maximum. */ | ||
| 288 | static inline void | ||
| 289 | malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data, | ||
| 290 | malloc_mutex_t *mutex) { | ||
| 291 | mutex_prof_data_t *source = &mutex->prof_data; | ||
| 292 | /* Can only read holding the mutex. */ | ||
| 293 | malloc_mutex_assert_owner(tsdn, mutex); | ||
| 294 | |||
| 295 | if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) { | ||
| 296 | nstime_copy(&data->tot_wait_time, &source->tot_wait_time); | ||
| 297 | } | ||
| 298 | if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) { | ||
| 299 | nstime_copy(&data->max_wait_time, &source->max_wait_time); | ||
| 300 | } | ||
| 301 | if (source->n_wait_times > data->n_wait_times) { | ||
| 302 | data->n_wait_times = source->n_wait_times; | ||
| 303 | } | ||
| 304 | if (source->n_spin_acquired > data->n_spin_acquired) { | ||
| 305 | data->n_spin_acquired = source->n_spin_acquired; | ||
| 306 | } | ||
| 307 | if (source->max_n_thds > data->max_n_thds) { | ||
| 308 | data->max_n_thds = source->max_n_thds; | ||
| 309 | } | ||
| 310 | if (source->n_owner_switches > data->n_owner_switches) { | ||
| 311 | data->n_owner_switches = source->n_owner_switches; | ||
| 312 | } | ||
| 313 | if (source->n_lock_ops > data->n_lock_ops) { | ||
| 314 | data->n_lock_ops = source->n_lock_ops; | ||
| 315 | } | ||
| 316 | /* n_wait_thds is not reported. */ | ||
| 317 | } | ||
| 318 | |||
| 319 | #endif /* JEMALLOC_INTERNAL_MUTEX_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mutex_prof.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mutex_prof.h deleted file mode 100644 index 4a526a5..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/mutex_prof.h +++ /dev/null | |||
| @@ -1,117 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H | ||
| 2 | #define JEMALLOC_INTERNAL_MUTEX_PROF_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/nstime.h" | ||
| 6 | #include "jemalloc/internal/tsd_types.h" | ||
| 7 | |||
| 8 | #define MUTEX_PROF_GLOBAL_MUTEXES \ | ||
| 9 | OP(background_thread) \ | ||
| 10 | OP(max_per_bg_thd) \ | ||
| 11 | OP(ctl) \ | ||
| 12 | OP(prof) \ | ||
| 13 | OP(prof_thds_data) \ | ||
| 14 | OP(prof_dump) \ | ||
| 15 | OP(prof_recent_alloc) \ | ||
| 16 | OP(prof_recent_dump) \ | ||
| 17 | OP(prof_stats) | ||
| 18 | |||
| 19 | typedef enum { | ||
| 20 | #define OP(mtx) global_prof_mutex_##mtx, | ||
| 21 | MUTEX_PROF_GLOBAL_MUTEXES | ||
| 22 | #undef OP | ||
| 23 | mutex_prof_num_global_mutexes | ||
| 24 | } mutex_prof_global_ind_t; | ||
| 25 | |||
| 26 | #define MUTEX_PROF_ARENA_MUTEXES \ | ||
| 27 | OP(large) \ | ||
| 28 | OP(extent_avail) \ | ||
| 29 | OP(extents_dirty) \ | ||
| 30 | OP(extents_muzzy) \ | ||
| 31 | OP(extents_retained) \ | ||
| 32 | OP(decay_dirty) \ | ||
| 33 | OP(decay_muzzy) \ | ||
| 34 | OP(base) \ | ||
| 35 | OP(tcache_list) \ | ||
| 36 | OP(hpa_shard) \ | ||
| 37 | OP(hpa_shard_grow) \ | ||
| 38 | OP(hpa_sec) | ||
| 39 | |||
| 40 | typedef enum { | ||
| 41 | #define OP(mtx) arena_prof_mutex_##mtx, | ||
| 42 | MUTEX_PROF_ARENA_MUTEXES | ||
| 43 | #undef OP | ||
| 44 | mutex_prof_num_arena_mutexes | ||
| 45 | } mutex_prof_arena_ind_t; | ||
| 46 | |||
| 47 | /* | ||
| 48 | * The forth parameter is a boolean value that is true for derived rate counters | ||
| 49 | * and false for real ones. | ||
| 50 | */ | ||
| 51 | #define MUTEX_PROF_UINT64_COUNTERS \ | ||
| 52 | OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \ | ||
| 53 | OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \ | ||
| 54 | OP(num_wait, uint64_t, "n_waiting", false, num_wait) \ | ||
| 55 | OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \ | ||
| 56 | OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \ | ||
| 57 | OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \ | ||
| 58 | OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \ | ||
| 59 | OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \ | ||
| 60 | OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \ | ||
| 61 | OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \ | ||
| 62 | OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time) | ||
| 63 | |||
| 64 | #define MUTEX_PROF_UINT32_COUNTERS \ | ||
| 65 | OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds) | ||
| 66 | |||
| 67 | #define MUTEX_PROF_COUNTERS \ | ||
| 68 | MUTEX_PROF_UINT64_COUNTERS \ | ||
| 69 | MUTEX_PROF_UINT32_COUNTERS | ||
| 70 | |||
| 71 | #define OP(counter, type, human, derived, base_counter) mutex_counter_##counter, | ||
| 72 | |||
| 73 | #define COUNTER_ENUM(counter_list, t) \ | ||
| 74 | typedef enum { \ | ||
| 75 | counter_list \ | ||
| 76 | mutex_prof_num_##t##_counters \ | ||
| 77 | } mutex_prof_##t##_counter_ind_t; | ||
| 78 | |||
| 79 | COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) | ||
| 80 | COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) | ||
| 81 | |||
| 82 | #undef COUNTER_ENUM | ||
| 83 | #undef OP | ||
| 84 | |||
| 85 | typedef struct { | ||
| 86 | /* | ||
| 87 | * Counters touched on the slow path, i.e. when there is lock | ||
| 88 | * contention. We update them once we have the lock. | ||
| 89 | */ | ||
| 90 | /* Total time (in nano seconds) spent waiting on this mutex. */ | ||
| 91 | nstime_t tot_wait_time; | ||
| 92 | /* Max time (in nano seconds) spent on a single lock operation. */ | ||
| 93 | nstime_t max_wait_time; | ||
| 94 | /* # of times have to wait for this mutex (after spinning). */ | ||
| 95 | uint64_t n_wait_times; | ||
| 96 | /* # of times acquired the mutex through local spinning. */ | ||
| 97 | uint64_t n_spin_acquired; | ||
| 98 | /* Max # of threads waiting for the mutex at the same time. */ | ||
| 99 | uint32_t max_n_thds; | ||
| 100 | /* Current # of threads waiting on the lock. Atomic synced. */ | ||
| 101 | atomic_u32_t n_waiting_thds; | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Data touched on the fast path. These are modified right after we | ||
| 105 | * grab the lock, so it's placed closest to the end (i.e. right before | ||
| 106 | * the lock) so that we have a higher chance of them being on the same | ||
| 107 | * cacheline. | ||
| 108 | */ | ||
| 109 | /* # of times the mutex holder is different than the previous one. */ | ||
| 110 | uint64_t n_owner_switches; | ||
| 111 | /* Previous mutex holder, to facilitate n_owner_switches. */ | ||
| 112 | tsdn_t *prev_owner; | ||
| 113 | /* # of lock() operations in total. */ | ||
| 114 | uint64_t n_lock_ops; | ||
| 115 | } mutex_prof_data_t; | ||
| 116 | |||
| 117 | #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/nstime.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/nstime.h deleted file mode 100644 index 486e5cc..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/nstime.h +++ /dev/null | |||
| @@ -1,73 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_NSTIME_H | ||
| 2 | #define JEMALLOC_INTERNAL_NSTIME_H | ||
| 3 | |||
| 4 | /* Maximum supported number of seconds (~584 years). */ | ||
| 5 | #define NSTIME_SEC_MAX KQU(18446744072) | ||
| 6 | |||
| 7 | #define NSTIME_MAGIC ((uint32_t)0xb8a9ce37) | ||
| 8 | #ifdef JEMALLOC_DEBUG | ||
| 9 | # define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC} | ||
| 10 | #else | ||
| 11 | # define NSTIME_ZERO_INITIALIZER {0} | ||
| 12 | #endif | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | uint64_t ns; | ||
| 16 | #ifdef JEMALLOC_DEBUG | ||
| 17 | uint32_t magic; /* Tracks if initialized. */ | ||
| 18 | #endif | ||
| 19 | } nstime_t; | ||
| 20 | |||
| 21 | static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER; | ||
| 22 | |||
| 23 | void nstime_init(nstime_t *time, uint64_t ns); | ||
| 24 | void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); | ||
| 25 | uint64_t nstime_ns(const nstime_t *time); | ||
| 26 | uint64_t nstime_sec(const nstime_t *time); | ||
| 27 | uint64_t nstime_msec(const nstime_t *time); | ||
| 28 | uint64_t nstime_nsec(const nstime_t *time); | ||
| 29 | void nstime_copy(nstime_t *time, const nstime_t *source); | ||
| 30 | int nstime_compare(const nstime_t *a, const nstime_t *b); | ||
| 31 | void nstime_add(nstime_t *time, const nstime_t *addend); | ||
| 32 | void nstime_iadd(nstime_t *time, uint64_t addend); | ||
| 33 | void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); | ||
| 34 | void nstime_isubtract(nstime_t *time, uint64_t subtrahend); | ||
| 35 | void nstime_imultiply(nstime_t *time, uint64_t multiplier); | ||
| 36 | void nstime_idivide(nstime_t *time, uint64_t divisor); | ||
| 37 | uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); | ||
| 38 | uint64_t nstime_ns_since(const nstime_t *past); | ||
| 39 | |||
| 40 | typedef bool (nstime_monotonic_t)(void); | ||
| 41 | extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; | ||
| 42 | |||
| 43 | typedef void (nstime_update_t)(nstime_t *); | ||
| 44 | extern nstime_update_t *JET_MUTABLE nstime_update; | ||
| 45 | |||
| 46 | typedef void (nstime_prof_update_t)(nstime_t *); | ||
| 47 | extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update; | ||
| 48 | |||
| 49 | void nstime_init_update(nstime_t *time); | ||
| 50 | void nstime_prof_init_update(nstime_t *time); | ||
| 51 | |||
| 52 | enum prof_time_res_e { | ||
| 53 | prof_time_res_default = 0, | ||
| 54 | prof_time_res_high = 1 | ||
| 55 | }; | ||
| 56 | typedef enum prof_time_res_e prof_time_res_t; | ||
| 57 | |||
| 58 | extern prof_time_res_t opt_prof_time_res; | ||
| 59 | extern const char *prof_time_res_mode_names[]; | ||
| 60 | |||
| 61 | JEMALLOC_ALWAYS_INLINE void | ||
| 62 | nstime_init_zero(nstime_t *time) { | ||
| 63 | nstime_copy(time, &nstime_zero); | ||
| 64 | } | ||
| 65 | |||
| 66 | JEMALLOC_ALWAYS_INLINE bool | ||
| 67 | nstime_equals_zero(nstime_t *time) { | ||
| 68 | int diff = nstime_compare(time, &nstime_zero); | ||
| 69 | assert(diff >= 0); | ||
| 70 | return diff == 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | #endif /* JEMALLOC_INTERNAL_NSTIME_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pa.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pa.h deleted file mode 100644 index 4748a05..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pa.h +++ /dev/null | |||
| @@ -1,243 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PA_H | ||
| 2 | #define JEMALLOC_INTERNAL_PA_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/base.h" | ||
| 5 | #include "jemalloc/internal/decay.h" | ||
| 6 | #include "jemalloc/internal/ecache.h" | ||
| 7 | #include "jemalloc/internal/edata_cache.h" | ||
| 8 | #include "jemalloc/internal/emap.h" | ||
| 9 | #include "jemalloc/internal/hpa.h" | ||
| 10 | #include "jemalloc/internal/lockedint.h" | ||
| 11 | #include "jemalloc/internal/pac.h" | ||
| 12 | #include "jemalloc/internal/pai.h" | ||
| 13 | #include "jemalloc/internal/sec.h" | ||
| 14 | |||
| 15 | /* | ||
| 16 | * The page allocator; responsible for acquiring pages of memory for | ||
| 17 | * allocations. It picks the implementation of the page allocator interface | ||
| 18 | * (i.e. a pai_t) to handle a given page-level allocation request. For now, the | ||
| 19 | * only such implementation is the PAC code ("page allocator classic"), but | ||
| 20 | * others will be coming soon. | ||
| 21 | */ | ||
| 22 | |||
| 23 | typedef struct pa_central_s pa_central_t; | ||
| 24 | struct pa_central_s { | ||
| 25 | hpa_central_t hpa; | ||
| 26 | }; | ||
| 27 | |||
| 28 | /* | ||
| 29 | * The stats for a particular pa_shard. Because of the way the ctl module | ||
| 30 | * handles stats epoch data collection (it has its own arena_stats, and merges | ||
| 31 | * the stats from each arena into it), this needs to live in the arena_stats_t; | ||
| 32 | * hence we define it here and let the pa_shard have a pointer (rather than the | ||
| 33 | * more natural approach of just embedding it in the pa_shard itself). | ||
| 34 | * | ||
| 35 | * We follow the arena_stats_t approach of marking the derived fields. These | ||
| 36 | * are the ones that are not maintained on their own; instead, their values are | ||
| 37 | * derived during those stats merges. | ||
| 38 | */ | ||
| 39 | typedef struct pa_shard_stats_s pa_shard_stats_t; | ||
| 40 | struct pa_shard_stats_s { | ||
| 41 | /* Number of edata_t structs allocated by base, but not being used. */ | ||
| 42 | size_t edata_avail; /* Derived. */ | ||
| 43 | /* | ||
| 44 | * Stats specific to the PAC. For now, these are the only stats that | ||
| 45 | * exist, but there will eventually be other page allocators. Things | ||
| 46 | * like edata_avail make sense in a cross-PA sense, but things like | ||
| 47 | * npurges don't. | ||
| 48 | */ | ||
| 49 | pac_stats_t pac_stats; | ||
| 50 | }; | ||
| 51 | |||
| 52 | /* | ||
| 53 | * The local allocator handle. Keeps the state necessary to satisfy page-sized | ||
| 54 | * allocations. | ||
| 55 | * | ||
| 56 | * The contents are mostly internal to the PA module. The key exception is that | ||
| 57 | * arena decay code is allowed to grab pointers to the dirty and muzzy ecaches | ||
| 58 | * decay_ts, for a couple of queries, passing them back to a PA function, or | ||
| 59 | * acquiring decay.mtx and looking at decay.purging. The reasoning is that, | ||
| 60 | * while PA decides what and how to purge, the arena code decides when and where | ||
| 61 | * (e.g. on what thread). It's allowed to use the presence of another purger to | ||
| 62 | * decide. | ||
| 63 | * (The background thread code also touches some other decay internals, but | ||
| 64 | * that's not fundamental; its' just an artifact of a partial refactoring, and | ||
| 65 | * its accesses could be straightforwardly moved inside the decay module). | ||
| 66 | */ | ||
| 67 | typedef struct pa_shard_s pa_shard_t; | ||
| 68 | struct pa_shard_s { | ||
| 69 | /* The central PA this shard is associated with. */ | ||
| 70 | pa_central_t *central; | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Number of pages in active extents. | ||
| 74 | * | ||
| 75 | * Synchronization: atomic. | ||
| 76 | */ | ||
| 77 | atomic_zu_t nactive; | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Whether or not we should prefer the hugepage allocator. Atomic since | ||
| 81 | * it may be concurrently modified by a thread setting extent hooks. | ||
| 82 | * Note that we still may do HPA operations in this arena; if use_hpa is | ||
| 83 | * changed from true to false, we'll free back to the hugepage allocator | ||
| 84 | * for those allocations. | ||
| 85 | */ | ||
| 86 | atomic_b_t use_hpa; | ||
| 87 | |||
| 88 | /* | ||
| 89 | * If we never used the HPA to begin with, it wasn't initialized, and so | ||
| 90 | * we shouldn't try to e.g. acquire its mutexes during fork. This | ||
| 91 | * tracks that knowledge. | ||
| 92 | */ | ||
| 93 | bool ever_used_hpa; | ||
| 94 | |||
| 95 | /* Allocates from a PAC. */ | ||
| 96 | pac_t pac; | ||
| 97 | |||
| 98 | /* | ||
| 99 | * We place a small extent cache in front of the HPA, since we intend | ||
| 100 | * these configurations to use many fewer arenas, and therefore have a | ||
| 101 | * higher risk of hot locks. | ||
| 102 | */ | ||
| 103 | sec_t hpa_sec; | ||
| 104 | hpa_shard_t hpa_shard; | ||
| 105 | |||
| 106 | /* The source of edata_t objects. */ | ||
| 107 | edata_cache_t edata_cache; | ||
| 108 | |||
| 109 | unsigned ind; | ||
| 110 | |||
| 111 | malloc_mutex_t *stats_mtx; | ||
| 112 | pa_shard_stats_t *stats; | ||
| 113 | |||
| 114 | /* The emap this shard is tied to. */ | ||
| 115 | emap_t *emap; | ||
| 116 | |||
| 117 | /* The base from which we get the ehooks and allocate metadat. */ | ||
| 118 | base_t *base; | ||
| 119 | }; | ||
| 120 | |||
| 121 | static inline bool | ||
| 122 | pa_shard_dont_decay_muzzy(pa_shard_t *shard) { | ||
| 123 | return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 && | ||
| 124 | pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0; | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline ehooks_t * | ||
| 128 | pa_shard_ehooks_get(pa_shard_t *shard) { | ||
| 129 | return base_ehooks_get(shard->base); | ||
| 130 | } | ||
| 131 | |||
| 132 | /* Returns true on error. */ | ||
| 133 | bool pa_central_init(pa_central_t *central, base_t *base, bool hpa, | ||
| 134 | hpa_hooks_t *hpa_hooks); | ||
| 135 | |||
| 136 | /* Returns true on error. */ | ||
| 137 | bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central, | ||
| 138 | emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats, | ||
| 139 | malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold, | ||
| 140 | ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms); | ||
| 141 | |||
| 142 | /* | ||
| 143 | * This isn't exposed to users; we allow late enablement of the HPA shard so | ||
| 144 | * that we can boot without worrying about the HPA, then turn it on in a0. | ||
| 145 | */ | ||
| 146 | bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard, | ||
| 147 | const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts); | ||
| 148 | |||
| 149 | /* | ||
| 150 | * We stop using the HPA when custom extent hooks are installed, but still | ||
| 151 | * redirect deallocations to it. | ||
| 152 | */ | ||
| 153 | void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 154 | |||
| 155 | /* | ||
| 156 | * This does the PA-specific parts of arena reset (i.e. freeing all active | ||
| 157 | * allocations). | ||
| 158 | */ | ||
| 159 | void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 160 | |||
| 161 | /* | ||
| 162 | * Destroy all the remaining retained extents. Should only be called after | ||
| 163 | * decaying all active, dirty, and muzzy extents to the retained state, as the | ||
| 164 | * last step in destroying the shard. | ||
| 165 | */ | ||
| 166 | void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 167 | |||
| 168 | /* Gets an edata for the given allocation. */ | ||
| 169 | edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, | ||
| 170 | size_t alignment, bool slab, szind_t szind, bool zero, bool guarded, | ||
| 171 | bool *deferred_work_generated); | ||
| 172 | /* Returns true on error, in which case nothing changed. */ | ||
| 173 | bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, | ||
| 174 | size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated); | ||
| 175 | /* | ||
| 176 | * The same. Sets *generated_dirty to true if we produced new dirty pages, and | ||
| 177 | * false otherwise. | ||
| 178 | */ | ||
| 179 | bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, | ||
| 180 | size_t new_size, szind_t szind, bool *deferred_work_generated); | ||
| 181 | /* | ||
| 182 | * Frees the given edata back to the pa. Sets *generated_dirty if we produced | ||
| 183 | * new dirty pages (well, we always set it for now; but this need not be the | ||
| 184 | * case). | ||
| 185 | * (We could make generated_dirty the return value of course, but this is more | ||
| 186 | * consistent with the shrink pathway and our error codes here). | ||
| 187 | */ | ||
| 188 | void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, | ||
| 189 | bool *deferred_work_generated); | ||
| 190 | bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state, | ||
| 191 | ssize_t decay_ms, pac_purge_eagerness_t eagerness); | ||
| 192 | ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state); | ||
| 193 | |||
| 194 | /* | ||
| 195 | * Do deferred work on this PA shard. | ||
| 196 | * | ||
| 197 | * Morally, this should do both PAC decay and the HPA deferred work. For now, | ||
| 198 | * though, the arena, background thread, and PAC modules are tightly interwoven | ||
| 199 | * in a way that's tricky to extricate, so we only do the HPA-specific parts. | ||
| 200 | */ | ||
| 201 | void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard, | ||
| 202 | bool deferral_allowed); | ||
| 203 | void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 204 | void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 205 | uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 206 | |||
| 207 | /******************************************************************************/ | ||
| 208 | /* | ||
| 209 | * Various bits of "boring" functionality that are still part of this module, | ||
| 210 | * but that we relegate to pa_extra.c, to keep the core logic in pa.c as | ||
| 211 | * readable as possible. | ||
| 212 | */ | ||
| 213 | |||
| 214 | /* | ||
| 215 | * These fork phases are synchronized with the arena fork phase numbering to | ||
| 216 | * make it easy to keep straight. That's why there's no prefork1. | ||
| 217 | */ | ||
| 218 | void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 219 | void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 220 | void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 221 | void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 222 | void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 223 | void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 224 | void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard); | ||
| 225 | |||
| 226 | void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, | ||
| 227 | size_t *ndirty, size_t *nmuzzy); | ||
| 228 | |||
| 229 | void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard, | ||
| 230 | pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out, | ||
| 231 | hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out, | ||
| 232 | size_t *resident); | ||
| 233 | |||
| 234 | /* | ||
| 235 | * Reads the PA-owned mutex stats into the output stats array, at the | ||
| 236 | * appropriate positions. Morally, these stats should really live in | ||
| 237 | * pa_shard_stats_t, but the indices are sort of baked into the various mutex | ||
| 238 | * prof macros. This would be a good thing to do at some point. | ||
| 239 | */ | ||
| 240 | void pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard, | ||
| 241 | mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]); | ||
| 242 | |||
| 243 | #endif /* JEMALLOC_INTERNAL_PA_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pac.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pac.h deleted file mode 100644 index 01c4e6a..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pac.h +++ /dev/null | |||
| @@ -1,179 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PAC_H | ||
| 2 | #define JEMALLOC_INTERNAL_PAC_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/exp_grow.h" | ||
| 5 | #include "jemalloc/internal/pai.h" | ||
| 6 | #include "san_bump.h" | ||
| 7 | |||
| 8 | |||
| 9 | /* | ||
| 10 | * Page allocator classic; an implementation of the PAI interface that: | ||
| 11 | * - Can be used for arenas with custom extent hooks. | ||
| 12 | * - Can always satisfy any allocation request (including highly-fragmentary | ||
| 13 | * ones). | ||
| 14 | * - Can use efficient OS-level zeroing primitives for demand-filled pages. | ||
| 15 | */ | ||
| 16 | |||
| 17 | /* How "eager" decay/purging should be. */ | ||
| 18 | enum pac_purge_eagerness_e { | ||
| 19 | PAC_PURGE_ALWAYS, | ||
| 20 | PAC_PURGE_NEVER, | ||
| 21 | PAC_PURGE_ON_EPOCH_ADVANCE | ||
| 22 | }; | ||
| 23 | typedef enum pac_purge_eagerness_e pac_purge_eagerness_t; | ||
| 24 | |||
| 25 | typedef struct pac_decay_stats_s pac_decay_stats_t; | ||
| 26 | struct pac_decay_stats_s { | ||
| 27 | /* Total number of purge sweeps. */ | ||
| 28 | locked_u64_t npurge; | ||
| 29 | /* Total number of madvise calls made. */ | ||
| 30 | locked_u64_t nmadvise; | ||
| 31 | /* Total number of pages purged. */ | ||
| 32 | locked_u64_t purged; | ||
| 33 | }; | ||
| 34 | |||
| 35 | typedef struct pac_estats_s pac_estats_t; | ||
| 36 | struct pac_estats_s { | ||
| 37 | /* | ||
| 38 | * Stats for a given index in the range [0, SC_NPSIZES] in the various | ||
| 39 | * ecache_ts. | ||
| 40 | * We track both bytes and # of extents: two extents in the same bucket | ||
| 41 | * may have different sizes if adjacent size classes differ by more than | ||
| 42 | * a page, so bytes cannot always be derived from # of extents. | ||
| 43 | */ | ||
| 44 | size_t ndirty; | ||
| 45 | size_t dirty_bytes; | ||
| 46 | size_t nmuzzy; | ||
| 47 | size_t muzzy_bytes; | ||
| 48 | size_t nretained; | ||
| 49 | size_t retained_bytes; | ||
| 50 | }; | ||
| 51 | |||
| 52 | typedef struct pac_stats_s pac_stats_t; | ||
| 53 | struct pac_stats_s { | ||
| 54 | pac_decay_stats_t decay_dirty; | ||
| 55 | pac_decay_stats_t decay_muzzy; | ||
| 56 | |||
| 57 | /* | ||
| 58 | * Number of unused virtual memory bytes currently retained. Retained | ||
| 59 | * bytes are technically mapped (though always decommitted or purged), | ||
| 60 | * but they are excluded from the mapped statistic (above). | ||
| 61 | */ | ||
| 62 | size_t retained; /* Derived. */ | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Number of bytes currently mapped, excluding retained memory (and any | ||
| 66 | * base-allocated memory, which is tracked by the arena stats). | ||
| 67 | * | ||
| 68 | * We name this "pac_mapped" to avoid confusion with the arena_stats | ||
| 69 | * "mapped". | ||
| 70 | */ | ||
| 71 | atomic_zu_t pac_mapped; | ||
| 72 | |||
| 73 | /* VM space had to be leaked (undocumented). Normally 0. */ | ||
| 74 | atomic_zu_t abandoned_vm; | ||
| 75 | }; | ||
| 76 | |||
| 77 | typedef struct pac_s pac_t; | ||
| 78 | struct pac_s { | ||
| 79 | /* | ||
| 80 | * Must be the first member (we convert it to a PAC given only a | ||
| 81 | * pointer). The handle to the allocation interface. | ||
| 82 | */ | ||
| 83 | pai_t pai; | ||
| 84 | /* | ||
| 85 | * Collections of extents that were previously allocated. These are | ||
| 86 | * used when allocating extents, in an attempt to re-use address space. | ||
| 87 | * | ||
| 88 | * Synchronization: internal. | ||
| 89 | */ | ||
| 90 | ecache_t ecache_dirty; | ||
| 91 | ecache_t ecache_muzzy; | ||
| 92 | ecache_t ecache_retained; | ||
| 93 | |||
| 94 | base_t *base; | ||
| 95 | emap_t *emap; | ||
| 96 | edata_cache_t *edata_cache; | ||
| 97 | |||
| 98 | /* The grow info for the retained ecache. */ | ||
| 99 | exp_grow_t exp_grow; | ||
| 100 | malloc_mutex_t grow_mtx; | ||
| 101 | |||
| 102 | /* Special allocator for guarded frequently reused extents. */ | ||
| 103 | san_bump_alloc_t sba; | ||
| 104 | |||
| 105 | /* How large extents should be before getting auto-purged. */ | ||
| 106 | atomic_zu_t oversize_threshold; | ||
| 107 | |||
| 108 | /* | ||
| 109 | * Decay-based purging state, responsible for scheduling extent state | ||
| 110 | * transitions. | ||
| 111 | * | ||
| 112 | * Synchronization: via the internal mutex. | ||
| 113 | */ | ||
| 114 | decay_t decay_dirty; /* dirty --> muzzy */ | ||
| 115 | decay_t decay_muzzy; /* muzzy --> retained */ | ||
| 116 | |||
| 117 | malloc_mutex_t *stats_mtx; | ||
| 118 | pac_stats_t *stats; | ||
| 119 | |||
| 120 | /* Extent serial number generator state. */ | ||
| 121 | atomic_zu_t extent_sn_next; | ||
| 122 | }; | ||
| 123 | |||
| 124 | bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, | ||
| 125 | edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold, | ||
| 126 | ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, | ||
| 127 | malloc_mutex_t *stats_mtx); | ||
| 128 | |||
| 129 | static inline size_t | ||
| 130 | pac_mapped(pac_t *pac) { | ||
| 131 | return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED); | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline ehooks_t * | ||
| 135 | pac_ehooks_get(pac_t *pac) { | ||
| 136 | return base_ehooks_get(pac->base); | ||
| 137 | } | ||
| 138 | |||
| 139 | /* | ||
| 140 | * All purging functions require holding decay->mtx. This is one of the few | ||
| 141 | * places external modules are allowed to peek inside pa_shard_t internals. | ||
| 142 | */ | ||
| 143 | |||
| 144 | /* | ||
| 145 | * Decays the number of pages currently in the ecache. This might not leave the | ||
| 146 | * ecache empty if other threads are inserting dirty objects into it | ||
| 147 | * concurrently with the call. | ||
| 148 | */ | ||
| 149 | void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay, | ||
| 150 | pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay); | ||
| 151 | /* | ||
| 152 | * Updates decay settings for the current time, and conditionally purges in | ||
| 153 | * response (depending on decay_purge_setting). Returns whether or not the | ||
| 154 | * epoch advanced. | ||
| 155 | */ | ||
| 156 | bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, | ||
| 157 | pac_decay_stats_t *decay_stats, ecache_t *ecache, | ||
| 158 | pac_purge_eagerness_t eagerness); | ||
| 159 | |||
| 160 | /* | ||
| 161 | * Gets / sets the maximum amount that we'll grow an arena down the | ||
| 162 | * grow-retained pathways (unless forced to by an allocaction request). | ||
| 163 | * | ||
| 164 | * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't | ||
| 165 | * care about the previous value. | ||
| 166 | * | ||
| 167 | * Returns true on error (if the new limit is not valid). | ||
| 168 | */ | ||
| 169 | bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit, | ||
| 170 | size_t *new_limit); | ||
| 171 | |||
| 172 | bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, | ||
| 173 | ssize_t decay_ms, pac_purge_eagerness_t eagerness); | ||
| 174 | ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state); | ||
| 175 | |||
| 176 | void pac_reset(tsdn_t *tsdn, pac_t *pac); | ||
| 177 | void pac_destroy(tsdn_t *tsdn, pac_t *pac); | ||
| 178 | |||
| 179 | #endif /* JEMALLOC_INTERNAL_PAC_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pages.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pages.h deleted file mode 100644 index ad1f606..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pages.h +++ /dev/null | |||
| @@ -1,119 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_PAGES_EXTERNS_H | ||
| 3 | |||
| 4 | /* Page size. LG_PAGE is determined by the configure script. */ | ||
| 5 | #ifdef PAGE_MASK | ||
| 6 | # undef PAGE_MASK | ||
| 7 | #endif | ||
| 8 | #define PAGE ((size_t)(1U << LG_PAGE)) | ||
| 9 | #define PAGE_MASK ((size_t)(PAGE - 1)) | ||
| 10 | /* Return the page base address for the page containing address a. */ | ||
| 11 | #define PAGE_ADDR2BASE(a) \ | ||
| 12 | ((void *)((uintptr_t)(a) & ~PAGE_MASK)) | ||
| 13 | /* Return the smallest pagesize multiple that is >= s. */ | ||
| 14 | #define PAGE_CEILING(s) \ | ||
| 15 | (((s) + PAGE_MASK) & ~PAGE_MASK) | ||
| 16 | /* Return the largest pagesize multiple that is <=s. */ | ||
| 17 | #define PAGE_FLOOR(s) \ | ||
| 18 | ((s) & ~PAGE_MASK) | ||
| 19 | |||
| 20 | /* Huge page size. LG_HUGEPAGE is determined by the configure script. */ | ||
| 21 | #define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) | ||
| 22 | #define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) | ||
| 23 | |||
| 24 | #if LG_HUGEPAGE != 0 | ||
| 25 | # define HUGEPAGE_PAGES (HUGEPAGE / PAGE) | ||
| 26 | #else | ||
| 27 | /* | ||
| 28 | * It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If | ||
| 29 | * we can't autodetect the hugepage size, it gets treated as 0, in which case | ||
| 30 | * we'll trigger a compiler error in those arrays. Avoid this case by ensuring | ||
| 31 | * that this value is at least 1. (We won't ever run in this degraded state; | ||
| 32 | * hpa_supported() returns false in this case. | ||
| 33 | */ | ||
| 34 | # define HUGEPAGE_PAGES 1 | ||
| 35 | #endif | ||
| 36 | |||
| 37 | /* Return the huge page base address for the huge page containing address a. */ | ||
| 38 | #define HUGEPAGE_ADDR2BASE(a) \ | ||
| 39 | ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) | ||
| 40 | /* Return the smallest pagesize multiple that is >= s. */ | ||
| 41 | #define HUGEPAGE_CEILING(s) \ | ||
| 42 | (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) | ||
| 43 | |||
| 44 | /* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ | ||
| 45 | #if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) | ||
| 46 | # define PAGES_CAN_PURGE_LAZY | ||
| 47 | #endif | ||
| 48 | /* | ||
| 49 | * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. | ||
| 50 | * | ||
| 51 | * The only supported way to hard-purge on Windows is to decommit and then | ||
| 52 | * re-commit, but doing so is racy, and if re-commit fails it's a pain to | ||
| 53 | * propagate the "poisoned" memory state. Since we typically decommit as the | ||
| 54 | * next step after purging on Windows anyway, there's no point in adding such | ||
| 55 | * complexity. | ||
| 56 | */ | ||
| 57 | #if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ | ||
| 58 | defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ | ||
| 59 | defined(JEMALLOC_MAPS_COALESCE)) | ||
| 60 | # define PAGES_CAN_PURGE_FORCED | ||
| 61 | #endif | ||
| 62 | |||
| 63 | static const bool pages_can_purge_lazy = | ||
| 64 | #ifdef PAGES_CAN_PURGE_LAZY | ||
| 65 | true | ||
| 66 | #else | ||
| 67 | false | ||
| 68 | #endif | ||
| 69 | ; | ||
| 70 | static const bool pages_can_purge_forced = | ||
| 71 | #ifdef PAGES_CAN_PURGE_FORCED | ||
| 72 | true | ||
| 73 | #else | ||
| 74 | false | ||
| 75 | #endif | ||
| 76 | ; | ||
| 77 | |||
| 78 | #if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL) | ||
| 79 | # define PAGES_CAN_HUGIFY | ||
| 80 | #endif | ||
| 81 | |||
| 82 | static const bool pages_can_hugify = | ||
| 83 | #ifdef PAGES_CAN_HUGIFY | ||
| 84 | true | ||
| 85 | #else | ||
| 86 | false | ||
| 87 | #endif | ||
| 88 | ; | ||
| 89 | |||
| 90 | typedef enum { | ||
| 91 | thp_mode_default = 0, /* Do not change hugepage settings. */ | ||
| 92 | thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ | ||
| 93 | thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ | ||
| 94 | |||
| 95 | thp_mode_names_limit = 3, /* Used for option processing. */ | ||
| 96 | thp_mode_not_supported = 3 /* No THP support detected. */ | ||
| 97 | } thp_mode_t; | ||
| 98 | |||
| 99 | #define THP_MODE_DEFAULT thp_mode_default | ||
| 100 | extern thp_mode_t opt_thp; | ||
| 101 | extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ | ||
| 102 | extern const char *thp_mode_names[]; | ||
| 103 | |||
| 104 | void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); | ||
| 105 | void pages_unmap(void *addr, size_t size); | ||
| 106 | bool pages_commit(void *addr, size_t size); | ||
| 107 | bool pages_decommit(void *addr, size_t size); | ||
| 108 | bool pages_purge_lazy(void *addr, size_t size); | ||
| 109 | bool pages_purge_forced(void *addr, size_t size); | ||
| 110 | bool pages_huge(void *addr, size_t size); | ||
| 111 | bool pages_nohuge(void *addr, size_t size); | ||
| 112 | bool pages_dontdump(void *addr, size_t size); | ||
| 113 | bool pages_dodump(void *addr, size_t size); | ||
| 114 | bool pages_boot(void); | ||
| 115 | void pages_set_thp_state (void *ptr, size_t size); | ||
| 116 | void pages_mark_guards(void *head, void *tail); | ||
| 117 | void pages_unmark_guards(void *head, void *tail); | ||
| 118 | |||
| 119 | #endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pai.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pai.h deleted file mode 100644 index d978cd7..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/pai.h +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PAI_H | ||
| 2 | #define JEMALLOC_INTERNAL_PAI_H | ||
| 3 | |||
| 4 | /* An interface for page allocation. */ | ||
| 5 | |||
| 6 | typedef struct pai_s pai_t; | ||
| 7 | struct pai_s { | ||
| 8 | /* Returns NULL on failure. */ | ||
| 9 | edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size, | ||
| 10 | size_t alignment, bool zero, bool guarded, bool frequent_reuse, | ||
| 11 | bool *deferred_work_generated); | ||
| 12 | /* | ||
| 13 | * Returns the number of extents added to the list (which may be fewer | ||
| 14 | * than requested, in case of OOM). The list should already be | ||
| 15 | * initialized. The only alignment guarantee is page-alignment, and | ||
| 16 | * the results are not necessarily zeroed. | ||
| 17 | */ | ||
| 18 | size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size, | ||
| 19 | size_t nallocs, edata_list_active_t *results, | ||
| 20 | bool *deferred_work_generated); | ||
| 21 | bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata, | ||
| 22 | size_t old_size, size_t new_size, bool zero, | ||
| 23 | bool *deferred_work_generated); | ||
| 24 | bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata, | ||
| 25 | size_t old_size, size_t new_size, bool *deferred_work_generated); | ||
| 26 | void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata, | ||
| 27 | bool *deferred_work_generated); | ||
| 28 | /* This function empties out list as a side-effect of being called. */ | ||
| 29 | void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self, | ||
| 30 | edata_list_active_t *list, bool *deferred_work_generated); | ||
| 31 | uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self); | ||
| 32 | }; | ||
| 33 | |||
| 34 | /* | ||
| 35 | * These are just simple convenience functions to avoid having to reference the | ||
| 36 | * same pai_t twice on every invocation. | ||
| 37 | */ | ||
| 38 | |||
| 39 | static inline edata_t * | ||
| 40 | pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, | ||
| 41 | bool zero, bool guarded, bool frequent_reuse, | ||
| 42 | bool *deferred_work_generated) { | ||
| 43 | return self->alloc(tsdn, self, size, alignment, zero, guarded, | ||
| 44 | frequent_reuse, deferred_work_generated); | ||
| 45 | } | ||
| 46 | |||
| 47 | static inline size_t | ||
| 48 | pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, | ||
| 49 | edata_list_active_t *results, bool *deferred_work_generated) { | ||
| 50 | return self->alloc_batch(tsdn, self, size, nallocs, results, | ||
| 51 | deferred_work_generated); | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline bool | ||
| 55 | pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, | ||
| 56 | size_t new_size, bool zero, bool *deferred_work_generated) { | ||
| 57 | return self->expand(tsdn, self, edata, old_size, new_size, zero, | ||
| 58 | deferred_work_generated); | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline bool | ||
| 62 | pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, | ||
| 63 | size_t new_size, bool *deferred_work_generated) { | ||
| 64 | return self->shrink(tsdn, self, edata, old_size, new_size, | ||
| 65 | deferred_work_generated); | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline void | ||
| 69 | pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, | ||
| 70 | bool *deferred_work_generated) { | ||
| 71 | self->dalloc(tsdn, self, edata, deferred_work_generated); | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline void | ||
| 75 | pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list, | ||
| 76 | bool *deferred_work_generated) { | ||
| 77 | self->dalloc_batch(tsdn, self, list, deferred_work_generated); | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline uint64_t | ||
| 81 | pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { | ||
| 82 | return self->time_until_deferred_work(tsdn, self); | ||
| 83 | } | ||
| 84 | |||
| 85 | /* | ||
| 86 | * An implementation of batch allocation that simply calls alloc once for | ||
| 87 | * each item in the list. | ||
| 88 | */ | ||
| 89 | size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, | ||
| 90 | size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated); | ||
| 91 | /* Ditto, for dalloc. */ | ||
| 92 | void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, | ||
| 93 | edata_list_active_t *list, bool *deferred_work_generated); | ||
| 94 | |||
| 95 | #endif /* JEMALLOC_INTERNAL_PAI_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/peak.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/peak.h deleted file mode 100644 index 59da3e4..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/peak.h +++ /dev/null | |||
| @@ -1,37 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PEAK_H | ||
| 2 | #define JEMALLOC_INTERNAL_PEAK_H | ||
| 3 | |||
| 4 | typedef struct peak_s peak_t; | ||
| 5 | struct peak_s { | ||
| 6 | /* The highest recorded peak value, after adjustment (see below). */ | ||
| 7 | uint64_t cur_max; | ||
| 8 | /* | ||
| 9 | * The difference between alloc and dalloc at the last set_zero call; | ||
| 10 | * this lets us cancel out the appropriate amount of excess. | ||
| 11 | */ | ||
| 12 | uint64_t adjustment; | ||
| 13 | }; | ||
| 14 | |||
| 15 | #define PEAK_INITIALIZER {0, 0} | ||
| 16 | |||
| 17 | static inline uint64_t | ||
| 18 | peak_max(peak_t *peak) { | ||
| 19 | return peak->cur_max; | ||
| 20 | } | ||
| 21 | |||
| 22 | static inline void | ||
| 23 | peak_update(peak_t *peak, uint64_t alloc, uint64_t dalloc) { | ||
| 24 | int64_t candidate_max = (int64_t)(alloc - dalloc - peak->adjustment); | ||
| 25 | if (candidate_max > (int64_t)peak->cur_max) { | ||
| 26 | peak->cur_max = candidate_max; | ||
| 27 | } | ||
| 28 | } | ||
| 29 | |||
| 30 | /* Resets the counter to zero; all peaks are now relative to this point. */ | ||
| 31 | static inline void | ||
| 32 | peak_set_zero(peak_t *peak, uint64_t alloc, uint64_t dalloc) { | ||
| 33 | peak->cur_max = 0; | ||
| 34 | peak->adjustment = alloc - dalloc; | ||
| 35 | } | ||
| 36 | |||
| 37 | #endif /* JEMALLOC_INTERNAL_PEAK_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/peak_event.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/peak_event.h deleted file mode 100644 index b808ce0..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/peak_event.h +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H | ||
| 2 | #define JEMALLOC_INTERNAL_PEAK_EVENT_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * While peak.h contains the simple helper struct that tracks state, this | ||
| 6 | * contains the allocator tie-ins (and knows about tsd, the event module, etc.). | ||
| 7 | */ | ||
| 8 | |||
| 9 | /* Update the peak with current tsd state. */ | ||
| 10 | void peak_event_update(tsd_t *tsd); | ||
| 11 | /* Set current state to zero. */ | ||
| 12 | void peak_event_zero(tsd_t *tsd); | ||
| 13 | uint64_t peak_event_max(tsd_t *tsd); | ||
| 14 | |||
| 15 | /* Manual hooks. */ | ||
| 16 | /* The activity-triggered hooks. */ | ||
| 17 | uint64_t peak_alloc_new_event_wait(tsd_t *tsd); | ||
| 18 | uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd); | ||
| 19 | void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed); | ||
| 20 | uint64_t peak_dalloc_new_event_wait(tsd_t *tsd); | ||
| 21 | uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd); | ||
| 22 | void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed); | ||
| 23 | |||
| 24 | #endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ph.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ph.h deleted file mode 100644 index 5f091c5..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ph.h +++ /dev/null | |||
| @@ -1,520 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PH_H | ||
| 2 | #define JEMALLOC_INTERNAL_PH_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * A Pairing Heap implementation. | ||
| 6 | * | ||
| 7 | * "The Pairing Heap: A New Form of Self-Adjusting Heap" | ||
| 8 | * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf | ||
| 9 | * | ||
| 10 | * With auxiliary twopass list, described in a follow on paper. | ||
| 11 | * | ||
| 12 | * "Pairing Heaps: Experiments and Analysis" | ||
| 13 | * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf | ||
| 14 | * | ||
| 15 | ******************************************************************************* | ||
| 16 | * | ||
| 17 | * We include a non-obvious optimization: | ||
| 18 | * - First, we introduce a new pop-and-link operation; pop the two most | ||
| 19 | * recently-inserted items off the aux-list, link them, and push the resulting | ||
| 20 | * heap. | ||
| 21 | * - We maintain a count of the number of insertions since the last time we | ||
| 22 | * merged the aux-list (i.e. via first() or remove_first()). After N inserts, | ||
| 23 | * we do ffs(N) pop-and-link operations. | ||
| 24 | * | ||
| 25 | * One way to think of this is that we're progressively building up a tree in | ||
| 26 | * the aux-list, rather than a linked-list (think of the series of merges that | ||
| 27 | * will be performed as the aux-count grows). | ||
| 28 | * | ||
| 29 | * There's a couple reasons we benefit from this: | ||
| 30 | * - Ordinarily, after N insertions, the aux-list is of size N. With our | ||
| 31 | * strategy, it's of size O(log(N)). So we decrease the worst-case time of | ||
| 32 | * first() calls, and reduce the average cost of remove_min calls. Since | ||
| 33 | * these almost always occur while holding a lock, we practically reduce the | ||
| 34 | * frequency of unusually long hold times. | ||
| 35 | * - This moves the bulk of the work of merging the aux-list onto the threads | ||
| 36 | * that are inserting into the heap. In some common scenarios, insertions | ||
| 37 | * happen in bulk, from a single thread (think tcache flushing; we potentially | ||
| 38 | * move many slabs from slabs_full to slabs_nonfull). All the nodes in this | ||
| 39 | * case are in the inserting threads cache, and linking them is very cheap | ||
| 40 | * (cache misses dominate linking cost). Without this optimization, linking | ||
| 41 | * happens on the next call to remove_first. Since that remove_first call | ||
| 42 | * likely happens on a different thread (or at least, after the cache has | ||
| 43 | * gotten cold if done on the same thread), deferring linking trades cheap | ||
| 44 | * link operations now for expensive ones later. | ||
| 45 | * | ||
| 46 | * The ffs trick keeps amortized insert cost at constant time. Similar | ||
| 47 | * strategies based on periodically sorting the list after a batch of operations | ||
| 48 | * perform worse than this in practice, even with various fancy tricks; they | ||
| 49 | * all took amortized complexity of an insert from O(1) to O(log(n)). | ||
| 50 | */ | ||
| 51 | |||
| 52 | typedef int (*ph_cmp_t)(void *, void *); | ||
| 53 | |||
| 54 | /* Node structure. */ | ||
| 55 | typedef struct phn_link_s phn_link_t; | ||
| 56 | struct phn_link_s { | ||
| 57 | void *prev; | ||
| 58 | void *next; | ||
| 59 | void *lchild; | ||
| 60 | }; | ||
| 61 | |||
| 62 | typedef struct ph_s ph_t; | ||
| 63 | struct ph_s { | ||
| 64 | void *root; | ||
| 65 | /* | ||
| 66 | * Inserts done since the last aux-list merge. This is not necessarily | ||
| 67 | * the size of the aux-list, since it's possible that removals have | ||
| 68 | * happened since, and we don't track whether or not those removals are | ||
| 69 | * from the aux list. | ||
| 70 | */ | ||
| 71 | size_t auxcount; | ||
| 72 | }; | ||
| 73 | |||
| 74 | JEMALLOC_ALWAYS_INLINE phn_link_t * | ||
| 75 | phn_link_get(void *phn, size_t offset) { | ||
| 76 | return (phn_link_t *)(((uintptr_t)phn) + offset); | ||
| 77 | } | ||
| 78 | |||
| 79 | JEMALLOC_ALWAYS_INLINE void | ||
| 80 | phn_link_init(void *phn, size_t offset) { | ||
| 81 | phn_link_get(phn, offset)->prev = NULL; | ||
| 82 | phn_link_get(phn, offset)->next = NULL; | ||
| 83 | phn_link_get(phn, offset)->lchild = NULL; | ||
| 84 | } | ||
| 85 | |||
| 86 | /* Internal utility helpers. */ | ||
| 87 | JEMALLOC_ALWAYS_INLINE void * | ||
| 88 | phn_lchild_get(void *phn, size_t offset) { | ||
| 89 | return phn_link_get(phn, offset)->lchild; | ||
| 90 | } | ||
| 91 | |||
| 92 | JEMALLOC_ALWAYS_INLINE void | ||
| 93 | phn_lchild_set(void *phn, void *lchild, size_t offset) { | ||
| 94 | phn_link_get(phn, offset)->lchild = lchild; | ||
| 95 | } | ||
| 96 | |||
| 97 | JEMALLOC_ALWAYS_INLINE void * | ||
| 98 | phn_next_get(void *phn, size_t offset) { | ||
| 99 | return phn_link_get(phn, offset)->next; | ||
| 100 | } | ||
| 101 | |||
| 102 | JEMALLOC_ALWAYS_INLINE void | ||
| 103 | phn_next_set(void *phn, void *next, size_t offset) { | ||
| 104 | phn_link_get(phn, offset)->next = next; | ||
| 105 | } | ||
| 106 | |||
| 107 | JEMALLOC_ALWAYS_INLINE void * | ||
| 108 | phn_prev_get(void *phn, size_t offset) { | ||
| 109 | return phn_link_get(phn, offset)->prev; | ||
| 110 | } | ||
| 111 | |||
| 112 | JEMALLOC_ALWAYS_INLINE void | ||
| 113 | phn_prev_set(void *phn, void *prev, size_t offset) { | ||
| 114 | phn_link_get(phn, offset)->prev = prev; | ||
| 115 | } | ||
| 116 | |||
| 117 | JEMALLOC_ALWAYS_INLINE void | ||
| 118 | phn_merge_ordered(void *phn0, void *phn1, size_t offset, | ||
| 119 | ph_cmp_t cmp) { | ||
| 120 | void *phn0child; | ||
| 121 | |||
| 122 | assert(phn0 != NULL); | ||
| 123 | assert(phn1 != NULL); | ||
| 124 | assert(cmp(phn0, phn1) <= 0); | ||
| 125 | |||
| 126 | phn_prev_set(phn1, phn0, offset); | ||
| 127 | phn0child = phn_lchild_get(phn0, offset); | ||
| 128 | phn_next_set(phn1, phn0child, offset); | ||
| 129 | if (phn0child != NULL) { | ||
| 130 | phn_prev_set(phn0child, phn1, offset); | ||
| 131 | } | ||
| 132 | phn_lchild_set(phn0, phn1, offset); | ||
| 133 | } | ||
| 134 | |||
| 135 | JEMALLOC_ALWAYS_INLINE void * | ||
| 136 | phn_merge(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) { | ||
| 137 | void *result; | ||
| 138 | if (phn0 == NULL) { | ||
| 139 | result = phn1; | ||
| 140 | } else if (phn1 == NULL) { | ||
| 141 | result = phn0; | ||
| 142 | } else if (cmp(phn0, phn1) < 0) { | ||
| 143 | phn_merge_ordered(phn0, phn1, offset, cmp); | ||
| 144 | result = phn0; | ||
| 145 | } else { | ||
| 146 | phn_merge_ordered(phn1, phn0, offset, cmp); | ||
| 147 | result = phn1; | ||
| 148 | } | ||
| 149 | return result; | ||
| 150 | } | ||
| 151 | |||
| 152 | JEMALLOC_ALWAYS_INLINE void * | ||
| 153 | phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) { | ||
| 154 | void *head = NULL; | ||
| 155 | void *tail = NULL; | ||
| 156 | void *phn0 = phn; | ||
| 157 | void *phn1 = phn_next_get(phn0, offset); | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Multipass merge, wherein the first two elements of a FIFO | ||
| 161 | * are repeatedly merged, and each result is appended to the | ||
| 162 | * singly linked FIFO, until the FIFO contains only a single | ||
| 163 | * element. We start with a sibling list but no reference to | ||
| 164 | * its tail, so we do a single pass over the sibling list to | ||
| 165 | * populate the FIFO. | ||
| 166 | */ | ||
| 167 | if (phn1 != NULL) { | ||
| 168 | void *phnrest = phn_next_get(phn1, offset); | ||
| 169 | if (phnrest != NULL) { | ||
| 170 | phn_prev_set(phnrest, NULL, offset); | ||
| 171 | } | ||
| 172 | phn_prev_set(phn0, NULL, offset); | ||
| 173 | phn_next_set(phn0, NULL, offset); | ||
| 174 | phn_prev_set(phn1, NULL, offset); | ||
| 175 | phn_next_set(phn1, NULL, offset); | ||
| 176 | phn0 = phn_merge(phn0, phn1, offset, cmp); | ||
| 177 | head = tail = phn0; | ||
| 178 | phn0 = phnrest; | ||
| 179 | while (phn0 != NULL) { | ||
| 180 | phn1 = phn_next_get(phn0, offset); | ||
| 181 | if (phn1 != NULL) { | ||
| 182 | phnrest = phn_next_get(phn1, offset); | ||
| 183 | if (phnrest != NULL) { | ||
| 184 | phn_prev_set(phnrest, NULL, offset); | ||
| 185 | } | ||
| 186 | phn_prev_set(phn0, NULL, offset); | ||
| 187 | phn_next_set(phn0, NULL, offset); | ||
| 188 | phn_prev_set(phn1, NULL, offset); | ||
| 189 | phn_next_set(phn1, NULL, offset); | ||
| 190 | phn0 = phn_merge(phn0, phn1, offset, cmp); | ||
| 191 | phn_next_set(tail, phn0, offset); | ||
| 192 | tail = phn0; | ||
| 193 | phn0 = phnrest; | ||
| 194 | } else { | ||
| 195 | phn_next_set(tail, phn0, offset); | ||
| 196 | tail = phn0; | ||
| 197 | phn0 = NULL; | ||
| 198 | } | ||
| 199 | } | ||
| 200 | phn0 = head; | ||
| 201 | phn1 = phn_next_get(phn0, offset); | ||
| 202 | if (phn1 != NULL) { | ||
| 203 | while (true) { | ||
| 204 | head = phn_next_get(phn1, offset); | ||
| 205 | assert(phn_prev_get(phn0, offset) == NULL); | ||
| 206 | phn_next_set(phn0, NULL, offset); | ||
| 207 | assert(phn_prev_get(phn1, offset) == NULL); | ||
| 208 | phn_next_set(phn1, NULL, offset); | ||
| 209 | phn0 = phn_merge(phn0, phn1, offset, cmp); | ||
| 210 | if (head == NULL) { | ||
| 211 | break; | ||
| 212 | } | ||
| 213 | phn_next_set(tail, phn0, offset); | ||
| 214 | tail = phn0; | ||
| 215 | phn0 = head; | ||
| 216 | phn1 = phn_next_get(phn0, offset); | ||
| 217 | } | ||
| 218 | } | ||
| 219 | } | ||
| 220 | return phn0; | ||
| 221 | } | ||
| 222 | |||
| 223 | JEMALLOC_ALWAYS_INLINE void | ||
| 224 | ph_merge_aux(ph_t *ph, size_t offset, ph_cmp_t cmp) { | ||
| 225 | ph->auxcount = 0; | ||
| 226 | void *phn = phn_next_get(ph->root, offset); | ||
| 227 | if (phn != NULL) { | ||
| 228 | phn_prev_set(ph->root, NULL, offset); | ||
| 229 | phn_next_set(ph->root, NULL, offset); | ||
| 230 | phn_prev_set(phn, NULL, offset); | ||
| 231 | phn = phn_merge_siblings(phn, offset, cmp); | ||
| 232 | assert(phn_next_get(phn, offset) == NULL); | ||
| 233 | ph->root = phn_merge(ph->root, phn, offset, cmp); | ||
| 234 | } | ||
| 235 | } | ||
| 236 | |||
| 237 | JEMALLOC_ALWAYS_INLINE void * | ||
| 238 | ph_merge_children(void *phn, size_t offset, ph_cmp_t cmp) { | ||
| 239 | void *result; | ||
| 240 | void *lchild = phn_lchild_get(phn, offset); | ||
| 241 | if (lchild == NULL) { | ||
| 242 | result = NULL; | ||
| 243 | } else { | ||
| 244 | result = phn_merge_siblings(lchild, offset, cmp); | ||
| 245 | } | ||
| 246 | return result; | ||
| 247 | } | ||
| 248 | |||
| 249 | JEMALLOC_ALWAYS_INLINE void | ||
| 250 | ph_new(ph_t *ph) { | ||
| 251 | ph->root = NULL; | ||
| 252 | ph->auxcount = 0; | ||
| 253 | } | ||
| 254 | |||
| 255 | JEMALLOC_ALWAYS_INLINE bool | ||
| 256 | ph_empty(ph_t *ph) { | ||
| 257 | return ph->root == NULL; | ||
| 258 | } | ||
| 259 | |||
| 260 | JEMALLOC_ALWAYS_INLINE void * | ||
| 261 | ph_first(ph_t *ph, size_t offset, ph_cmp_t cmp) { | ||
| 262 | if (ph->root == NULL) { | ||
| 263 | return NULL; | ||
| 264 | } | ||
| 265 | ph_merge_aux(ph, offset, cmp); | ||
| 266 | return ph->root; | ||
| 267 | } | ||
| 268 | |||
| 269 | JEMALLOC_ALWAYS_INLINE void * | ||
| 270 | ph_any(ph_t *ph, size_t offset) { | ||
| 271 | if (ph->root == NULL) { | ||
| 272 | return NULL; | ||
| 273 | } | ||
| 274 | void *aux = phn_next_get(ph->root, offset); | ||
| 275 | if (aux != NULL) { | ||
| 276 | return aux; | ||
| 277 | } | ||
| 278 | return ph->root; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* Returns true if we should stop trying to merge. */ | ||
| 282 | JEMALLOC_ALWAYS_INLINE bool | ||
| 283 | ph_try_aux_merge_pair(ph_t *ph, size_t offset, ph_cmp_t cmp) { | ||
| 284 | assert(ph->root != NULL); | ||
| 285 | void *phn0 = phn_next_get(ph->root, offset); | ||
| 286 | if (phn0 == NULL) { | ||
| 287 | return true; | ||
| 288 | } | ||
| 289 | void *phn1 = phn_next_get(phn0, offset); | ||
| 290 | if (phn1 == NULL) { | ||
| 291 | return true; | ||
| 292 | } | ||
| 293 | void *next_phn1 = phn_next_get(phn1, offset); | ||
| 294 | phn_next_set(phn0, NULL, offset); | ||
| 295 | phn_prev_set(phn0, NULL, offset); | ||
| 296 | phn_next_set(phn1, NULL, offset); | ||
| 297 | phn_prev_set(phn1, NULL, offset); | ||
| 298 | phn0 = phn_merge(phn0, phn1, offset, cmp); | ||
| 299 | phn_next_set(phn0, next_phn1, offset); | ||
| 300 | if (next_phn1 != NULL) { | ||
| 301 | phn_prev_set(next_phn1, phn0, offset); | ||
| 302 | } | ||
| 303 | phn_next_set(ph->root, phn0, offset); | ||
| 304 | phn_prev_set(phn0, ph->root, offset); | ||
| 305 | return next_phn1 == NULL; | ||
| 306 | } | ||
| 307 | |||
| 308 | JEMALLOC_ALWAYS_INLINE void | ||
| 309 | ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) { | ||
| 310 | phn_link_init(phn, offset); | ||
| 311 | |||
| 312 | /* | ||
| 313 | * Treat the root as an aux list during insertion, and lazily merge | ||
| 314 | * during a_prefix##remove_first(). For elements that are inserted, | ||
| 315 | * then removed via a_prefix##remove() before the aux list is ever | ||
| 316 | * processed, this makes insert/remove constant-time, whereas eager | ||
| 317 | * merging would make insert O(log n). | ||
| 318 | */ | ||
| 319 | if (ph->root == NULL) { | ||
| 320 | ph->root = phn; | ||
| 321 | } else { | ||
| 322 | /* | ||
| 323 | * As a special case, check to see if we can replace the root. | ||
| 324 | * This is practically common in some important cases, and lets | ||
| 325 | * us defer some insertions (hopefully, until the point where | ||
| 326 | * some of the items in the aux list have been removed, savings | ||
| 327 | * us from linking them at all). | ||
| 328 | */ | ||
| 329 | if (cmp(phn, ph->root) < 0) { | ||
| 330 | phn_lchild_set(phn, ph->root, offset); | ||
| 331 | phn_prev_set(ph->root, phn, offset); | ||
| 332 | ph->root = phn; | ||
| 333 | ph->auxcount = 0; | ||
| 334 | return; | ||
| 335 | } | ||
| 336 | ph->auxcount++; | ||
| 337 | phn_next_set(phn, phn_next_get(ph->root, offset), offset); | ||
| 338 | if (phn_next_get(ph->root, offset) != NULL) { | ||
| 339 | phn_prev_set(phn_next_get(ph->root, offset), phn, | ||
| 340 | offset); | ||
| 341 | } | ||
| 342 | phn_prev_set(phn, ph->root, offset); | ||
| 343 | phn_next_set(ph->root, phn, offset); | ||
| 344 | } | ||
| 345 | if (ph->auxcount > 1) { | ||
| 346 | unsigned nmerges = ffs_zu(ph->auxcount - 1); | ||
| 347 | bool done = false; | ||
| 348 | for (unsigned i = 0; i < nmerges && !done; i++) { | ||
| 349 | done = ph_try_aux_merge_pair(ph, offset, cmp); | ||
| 350 | } | ||
| 351 | } | ||
| 352 | } | ||
| 353 | |||
| 354 | JEMALLOC_ALWAYS_INLINE void * | ||
| 355 | ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) { | ||
| 356 | void *ret; | ||
| 357 | |||
| 358 | if (ph->root == NULL) { | ||
| 359 | return NULL; | ||
| 360 | } | ||
| 361 | ph_merge_aux(ph, offset, cmp); | ||
| 362 | ret = ph->root; | ||
| 363 | ph->root = ph_merge_children(ph->root, offset, cmp); | ||
| 364 | |||
| 365 | return ret; | ||
| 366 | |||
| 367 | } | ||
| 368 | |||
| 369 | JEMALLOC_ALWAYS_INLINE void | ||
| 370 | ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) { | ||
| 371 | void *replace; | ||
| 372 | void *parent; | ||
| 373 | |||
| 374 | if (ph->root == phn) { | ||
| 375 | /* | ||
| 376 | * We can delete from aux list without merging it, but we need | ||
| 377 | * to merge if we are dealing with the root node and it has | ||
| 378 | * children. | ||
| 379 | */ | ||
| 380 | if (phn_lchild_get(phn, offset) == NULL) { | ||
| 381 | ph->root = phn_next_get(phn, offset); | ||
| 382 | if (ph->root != NULL) { | ||
| 383 | phn_prev_set(ph->root, NULL, offset); | ||
| 384 | } | ||
| 385 | return; | ||
| 386 | } | ||
| 387 | ph_merge_aux(ph, offset, cmp); | ||
| 388 | if (ph->root == phn) { | ||
| 389 | ph->root = ph_merge_children(ph->root, offset, cmp); | ||
| 390 | return; | ||
| 391 | } | ||
| 392 | } | ||
| 393 | |||
| 394 | /* Get parent (if phn is leftmost child) before mutating. */ | ||
| 395 | if ((parent = phn_prev_get(phn, offset)) != NULL) { | ||
| 396 | if (phn_lchild_get(parent, offset) != phn) { | ||
| 397 | parent = NULL; | ||
| 398 | } | ||
| 399 | } | ||
| 400 | /* Find a possible replacement node, and link to parent. */ | ||
| 401 | replace = ph_merge_children(phn, offset, cmp); | ||
| 402 | /* Set next/prev for sibling linked list. */ | ||
| 403 | if (replace != NULL) { | ||
| 404 | if (parent != NULL) { | ||
| 405 | phn_prev_set(replace, parent, offset); | ||
| 406 | phn_lchild_set(parent, replace, offset); | ||
| 407 | } else { | ||
| 408 | phn_prev_set(replace, phn_prev_get(phn, offset), | ||
| 409 | offset); | ||
| 410 | if (phn_prev_get(phn, offset) != NULL) { | ||
| 411 | phn_next_set(phn_prev_get(phn, offset), replace, | ||
| 412 | offset); | ||
| 413 | } | ||
| 414 | } | ||
| 415 | phn_next_set(replace, phn_next_get(phn, offset), offset); | ||
| 416 | if (phn_next_get(phn, offset) != NULL) { | ||
| 417 | phn_prev_set(phn_next_get(phn, offset), replace, | ||
| 418 | offset); | ||
| 419 | } | ||
| 420 | } else { | ||
| 421 | if (parent != NULL) { | ||
| 422 | void *next = phn_next_get(phn, offset); | ||
| 423 | phn_lchild_set(parent, next, offset); | ||
| 424 | if (next != NULL) { | ||
| 425 | phn_prev_set(next, parent, offset); | ||
| 426 | } | ||
| 427 | } else { | ||
| 428 | assert(phn_prev_get(phn, offset) != NULL); | ||
| 429 | phn_next_set( | ||
| 430 | phn_prev_get(phn, offset), | ||
| 431 | phn_next_get(phn, offset), offset); | ||
| 432 | } | ||
| 433 | if (phn_next_get(phn, offset) != NULL) { | ||
| 434 | phn_prev_set( | ||
| 435 | phn_next_get(phn, offset), | ||
| 436 | phn_prev_get(phn, offset), offset); | ||
| 437 | } | ||
| 438 | } | ||
| 439 | } | ||
| 440 | |||
| 441 | #define ph_structs(a_prefix, a_type) \ | ||
| 442 | typedef struct { \ | ||
| 443 | phn_link_t link; \ | ||
| 444 | } a_prefix##_link_t; \ | ||
| 445 | \ | ||
| 446 | typedef struct { \ | ||
| 447 | ph_t ph; \ | ||
| 448 | } a_prefix##_t; | ||
| 449 | |||
| 450 | /* | ||
| 451 | * The ph_proto() macro generates function prototypes that correspond to the | ||
| 452 | * functions generated by an equivalently parameterized call to ph_gen(). | ||
| 453 | */ | ||
| 454 | #define ph_proto(a_attr, a_prefix, a_type) \ | ||
| 455 | \ | ||
| 456 | a_attr void a_prefix##_new(a_prefix##_t *ph); \ | ||
| 457 | a_attr bool a_prefix##_empty(a_prefix##_t *ph); \ | ||
| 458 | a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \ | ||
| 459 | a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \ | ||
| 460 | a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \ | ||
| 461 | a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \ | ||
| 462 | a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \ | ||
| 463 | a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph); | ||
| 464 | |||
| 465 | /* The ph_gen() macro generates a type-specific pairing heap implementation. */ | ||
| 466 | #define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \ | ||
| 467 | JEMALLOC_ALWAYS_INLINE int \ | ||
| 468 | a_prefix##_ph_cmp(void *a, void *b) { \ | ||
| 469 | return a_cmp((a_type *)a, (a_type *)b); \ | ||
| 470 | } \ | ||
| 471 | \ | ||
| 472 | a_attr void \ | ||
| 473 | a_prefix##_new(a_prefix##_t *ph) { \ | ||
| 474 | ph_new(&ph->ph); \ | ||
| 475 | } \ | ||
| 476 | \ | ||
| 477 | a_attr bool \ | ||
| 478 | a_prefix##_empty(a_prefix##_t *ph) { \ | ||
| 479 | return ph_empty(&ph->ph); \ | ||
| 480 | } \ | ||
| 481 | \ | ||
| 482 | a_attr a_type * \ | ||
| 483 | a_prefix##_first(a_prefix##_t *ph) { \ | ||
| 484 | return ph_first(&ph->ph, offsetof(a_type, a_field), \ | ||
| 485 | &a_prefix##_ph_cmp); \ | ||
| 486 | } \ | ||
| 487 | \ | ||
| 488 | a_attr a_type * \ | ||
| 489 | a_prefix##_any(a_prefix##_t *ph) { \ | ||
| 490 | return ph_any(&ph->ph, offsetof(a_type, a_field)); \ | ||
| 491 | } \ | ||
| 492 | \ | ||
| 493 | a_attr void \ | ||
| 494 | a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \ | ||
| 495 | ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \ | ||
| 496 | a_prefix##_ph_cmp); \ | ||
| 497 | } \ | ||
| 498 | \ | ||
| 499 | a_attr a_type * \ | ||
| 500 | a_prefix##_remove_first(a_prefix##_t *ph) { \ | ||
| 501 | return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \ | ||
| 502 | a_prefix##_ph_cmp); \ | ||
| 503 | } \ | ||
| 504 | \ | ||
| 505 | a_attr void \ | ||
| 506 | a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \ | ||
| 507 | ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \ | ||
| 508 | a_prefix##_ph_cmp); \ | ||
| 509 | } \ | ||
| 510 | \ | ||
| 511 | a_attr a_type * \ | ||
| 512 | a_prefix##_remove_any(a_prefix##_t *ph) { \ | ||
| 513 | a_type *ret = a_prefix##_any(ph); \ | ||
| 514 | if (ret != NULL) { \ | ||
| 515 | a_prefix##_remove(ph, ret); \ | ||
| 516 | } \ | ||
| 517 | return ret; \ | ||
| 518 | } | ||
| 519 | |||
| 520 | #endif /* JEMALLOC_INTERNAL_PH_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/private_namespace.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/private_namespace.sh deleted file mode 100755 index 6ef1346..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/private_namespace.sh +++ /dev/null | |||
| @@ -1,5 +0,0 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | |||
| 3 | for symbol in `cat "$@"` ; do | ||
| 4 | echo "#define ${symbol} JEMALLOC_N(${symbol})" | ||
| 5 | done | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/private_symbols.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/private_symbols.sh deleted file mode 100755 index 442a259..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/private_symbols.sh +++ /dev/null | |||
| @@ -1,51 +0,0 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | # | ||
| 3 | # Generate private_symbols[_jet].awk. | ||
| 4 | # | ||
| 5 | # Usage: private_symbols.sh <sym_prefix> <sym>* | ||
| 6 | # | ||
| 7 | # <sym_prefix> is typically "" or "_". | ||
| 8 | |||
| 9 | sym_prefix=$1 | ||
| 10 | shift | ||
| 11 | |||
| 12 | cat <<EOF | ||
| 13 | #!/usr/bin/env awk -f | ||
| 14 | |||
| 15 | BEGIN { | ||
| 16 | sym_prefix = "${sym_prefix}" | ||
| 17 | split("\\ | ||
| 18 | EOF | ||
| 19 | |||
| 20 | for public_sym in "$@" ; do | ||
| 21 | cat <<EOF | ||
| 22 | ${sym_prefix}${public_sym} \\ | ||
| 23 | EOF | ||
| 24 | done | ||
| 25 | |||
| 26 | cat <<"EOF" | ||
| 27 | ", exported_symbol_names) | ||
| 28 | # Store exported symbol names as keys in exported_symbols. | ||
| 29 | for (i in exported_symbol_names) { | ||
| 30 | exported_symbols[exported_symbol_names[i]] = 1 | ||
| 31 | } | ||
| 32 | } | ||
| 33 | |||
| 34 | # Process 'nm -a <c_source.o>' output. | ||
| 35 | # | ||
| 36 | # Handle lines like: | ||
| 37 | # 0000000000000008 D opt_junk | ||
| 38 | # 0000000000007574 T malloc_initialized | ||
| 39 | (NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { | ||
| 40 | print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) | ||
| 41 | } | ||
| 42 | |||
| 43 | # Process 'dumpbin /SYMBOLS <c_source.obj>' output. | ||
| 44 | # | ||
| 45 | # Handle lines like: | ||
| 46 | # 353 00008098 SECT4 notype External | opt_junk | ||
| 47 | # 3F1 00000000 SECT7 notype () External | malloc_initialized | ||
| 48 | ($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { | ||
| 49 | print $NF | ||
| 50 | } | ||
| 51 | EOF | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prng.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prng.h deleted file mode 100644 index 14542aa..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prng.h +++ /dev/null | |||
| @@ -1,168 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PRNG_H | ||
| 2 | #define JEMALLOC_INTERNAL_PRNG_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bit_util.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Simple linear congruential pseudo-random number generator: | ||
| 8 | * | ||
| 9 | * prng(y) = (a*x + c) % m | ||
| 10 | * | ||
| 11 | * where the following constants ensure maximal period: | ||
| 12 | * | ||
| 13 | * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. | ||
| 14 | * c == Odd number (relatively prime to 2^n). | ||
| 15 | * m == 2^32 | ||
| 16 | * | ||
| 17 | * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. | ||
| 18 | * | ||
| 19 | * This choice of m has the disadvantage that the quality of the bits is | ||
| 20 | * proportional to bit position. For example, the lowest bit has a cycle of 2, | ||
| 21 | * the next has a cycle of 4, etc. For this reason, we prefer to use the upper | ||
| 22 | * bits. | ||
| 23 | */ | ||
| 24 | |||
| 25 | /******************************************************************************/ | ||
| 26 | /* INTERNAL DEFINITIONS -- IGNORE */ | ||
| 27 | /******************************************************************************/ | ||
| 28 | #define PRNG_A_32 UINT32_C(1103515241) | ||
| 29 | #define PRNG_C_32 UINT32_C(12347) | ||
| 30 | |||
| 31 | #define PRNG_A_64 UINT64_C(6364136223846793005) | ||
| 32 | #define PRNG_C_64 UINT64_C(1442695040888963407) | ||
| 33 | |||
| 34 | JEMALLOC_ALWAYS_INLINE uint32_t | ||
| 35 | prng_state_next_u32(uint32_t state) { | ||
| 36 | return (state * PRNG_A_32) + PRNG_C_32; | ||
| 37 | } | ||
| 38 | |||
| 39 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 40 | prng_state_next_u64(uint64_t state) { | ||
| 41 | return (state * PRNG_A_64) + PRNG_C_64; | ||
| 42 | } | ||
| 43 | |||
| 44 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 45 | prng_state_next_zu(size_t state) { | ||
| 46 | #if LG_SIZEOF_PTR == 2 | ||
| 47 | return (state * PRNG_A_32) + PRNG_C_32; | ||
| 48 | #elif LG_SIZEOF_PTR == 3 | ||
| 49 | return (state * PRNG_A_64) + PRNG_C_64; | ||
| 50 | #else | ||
| 51 | #error Unsupported pointer size | ||
| 52 | #endif | ||
| 53 | } | ||
| 54 | |||
| 55 | /******************************************************************************/ | ||
| 56 | /* BEGIN PUBLIC API */ | ||
| 57 | /******************************************************************************/ | ||
| 58 | |||
| 59 | /* | ||
| 60 | * The prng_lg_range functions give a uniform int in the half-open range [0, | ||
| 61 | * 2**lg_range). | ||
| 62 | */ | ||
| 63 | |||
| 64 | JEMALLOC_ALWAYS_INLINE uint32_t | ||
| 65 | prng_lg_range_u32(uint32_t *state, unsigned lg_range) { | ||
| 66 | assert(lg_range > 0); | ||
| 67 | assert(lg_range <= 32); | ||
| 68 | |||
| 69 | *state = prng_state_next_u32(*state); | ||
| 70 | uint32_t ret = *state >> (32 - lg_range); | ||
| 71 | |||
| 72 | return ret; | ||
| 73 | } | ||
| 74 | |||
| 75 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 76 | prng_lg_range_u64(uint64_t *state, unsigned lg_range) { | ||
| 77 | assert(lg_range > 0); | ||
| 78 | assert(lg_range <= 64); | ||
| 79 | |||
| 80 | *state = prng_state_next_u64(*state); | ||
| 81 | uint64_t ret = *state >> (64 - lg_range); | ||
| 82 | |||
| 83 | return ret; | ||
| 84 | } | ||
| 85 | |||
| 86 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 87 | prng_lg_range_zu(size_t *state, unsigned lg_range) { | ||
| 88 | assert(lg_range > 0); | ||
| 89 | assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); | ||
| 90 | |||
| 91 | *state = prng_state_next_zu(*state); | ||
| 92 | size_t ret = *state >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); | ||
| 93 | |||
| 94 | return ret; | ||
| 95 | } | ||
| 96 | |||
| 97 | /* | ||
| 98 | * The prng_range functions behave like the prng_lg_range, but return a result | ||
| 99 | * in [0, range) instead of [0, 2**lg_range). | ||
| 100 | */ | ||
| 101 | |||
| 102 | JEMALLOC_ALWAYS_INLINE uint32_t | ||
| 103 | prng_range_u32(uint32_t *state, uint32_t range) { | ||
| 104 | assert(range != 0); | ||
| 105 | /* | ||
| 106 | * If range were 1, lg_range would be 0, so the shift in | ||
| 107 | * prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits, | ||
| 108 | * which is UB. Just handle this case as a one-off. | ||
| 109 | */ | ||
| 110 | if (range == 1) { | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | /* Compute the ceiling of lg(range). */ | ||
| 115 | unsigned lg_range = ffs_u32(pow2_ceil_u32(range)); | ||
| 116 | |||
| 117 | /* Generate a result in [0..range) via repeated trial. */ | ||
| 118 | uint32_t ret; | ||
| 119 | do { | ||
| 120 | ret = prng_lg_range_u32(state, lg_range); | ||
| 121 | } while (ret >= range); | ||
| 122 | |||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 127 | prng_range_u64(uint64_t *state, uint64_t range) { | ||
| 128 | assert(range != 0); | ||
| 129 | |||
| 130 | /* See the note in prng_range_u32. */ | ||
| 131 | if (range == 1) { | ||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* Compute the ceiling of lg(range). */ | ||
| 136 | unsigned lg_range = ffs_u64(pow2_ceil_u64(range)); | ||
| 137 | |||
| 138 | /* Generate a result in [0..range) via repeated trial. */ | ||
| 139 | uint64_t ret; | ||
| 140 | do { | ||
| 141 | ret = prng_lg_range_u64(state, lg_range); | ||
| 142 | } while (ret >= range); | ||
| 143 | |||
| 144 | return ret; | ||
| 145 | } | ||
| 146 | |||
| 147 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 148 | prng_range_zu(size_t *state, size_t range) { | ||
| 149 | assert(range != 0); | ||
| 150 | |||
| 151 | /* See the note in prng_range_u32. */ | ||
| 152 | if (range == 1) { | ||
| 153 | return 0; | ||
| 154 | } | ||
| 155 | |||
| 156 | /* Compute the ceiling of lg(range). */ | ||
| 157 | unsigned lg_range = ffs_u64(pow2_ceil_u64(range)); | ||
| 158 | |||
| 159 | /* Generate a result in [0..range) via repeated trial. */ | ||
| 160 | size_t ret; | ||
| 161 | do { | ||
| 162 | ret = prng_lg_range_zu(state, lg_range); | ||
| 163 | } while (ret >= range); | ||
| 164 | |||
| 165 | return ret; | ||
| 166 | } | ||
| 167 | |||
| 168 | #endif /* JEMALLOC_INTERNAL_PRNG_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_data.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_data.h deleted file mode 100644 index 4c8e22c..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_data.h +++ /dev/null | |||
| @@ -1,37 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_DATA_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_DATA_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/mutex.h" | ||
| 5 | |||
| 6 | extern malloc_mutex_t bt2gctx_mtx; | ||
| 7 | extern malloc_mutex_t tdatas_mtx; | ||
| 8 | extern malloc_mutex_t prof_dump_mtx; | ||
| 9 | |||
| 10 | extern malloc_mutex_t *gctx_locks; | ||
| 11 | extern malloc_mutex_t *tdata_locks; | ||
| 12 | |||
| 13 | extern size_t prof_unbiased_sz[PROF_SC_NSIZES]; | ||
| 14 | extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES]; | ||
| 15 | |||
| 16 | void prof_bt_hash(const void *key, size_t r_hash[2]); | ||
| 17 | bool prof_bt_keycomp(const void *k1, const void *k2); | ||
| 18 | |||
| 19 | bool prof_data_init(tsd_t *tsd); | ||
| 20 | prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); | ||
| 21 | char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); | ||
| 22 | int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name); | ||
| 23 | void prof_unbias_map_init(); | ||
| 24 | void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, | ||
| 25 | prof_tdata_t *tdata, bool leakcheck); | ||
| 26 | prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, | ||
| 27 | uint64_t thr_discrim, char *thread_name, bool active); | ||
| 28 | void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata); | ||
| 29 | void prof_reset(tsd_t *tsd, size_t lg_sample); | ||
| 30 | void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx); | ||
| 31 | |||
| 32 | /* Used in unit tests. */ | ||
| 33 | size_t prof_tdata_count(void); | ||
| 34 | size_t prof_bt_count(void); | ||
| 35 | void prof_cnt_all(prof_cnt_t *cnt_all); | ||
| 36 | |||
| 37 | #endif /* JEMALLOC_INTERNAL_PROF_DATA_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_externs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_externs.h deleted file mode 100644 index bdff134..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_externs.h +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_EXTERNS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/mutex.h" | ||
| 5 | #include "jemalloc/internal/prof_hook.h" | ||
| 6 | |||
| 7 | extern bool opt_prof; | ||
| 8 | extern bool opt_prof_active; | ||
| 9 | extern bool opt_prof_thread_active_init; | ||
| 10 | extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ | ||
| 11 | extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ | ||
| 12 | extern bool opt_prof_gdump; /* High-water memory dumping. */ | ||
| 13 | extern bool opt_prof_final; /* Final profile dumping. */ | ||
| 14 | extern bool opt_prof_leak; /* Dump leak summary at exit. */ | ||
| 15 | extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */ | ||
| 16 | extern bool opt_prof_accum; /* Report cumulative bytes. */ | ||
| 17 | extern bool opt_prof_log; /* Turn logging on at boot. */ | ||
| 18 | extern char opt_prof_prefix[ | ||
| 19 | /* Minimize memory bloat for non-prof builds. */ | ||
| 20 | #ifdef JEMALLOC_PROF | ||
| 21 | PATH_MAX + | ||
| 22 | #endif | ||
| 23 | 1]; | ||
| 24 | extern bool opt_prof_unbias; | ||
| 25 | |||
| 26 | /* For recording recent allocations */ | ||
| 27 | extern ssize_t opt_prof_recent_alloc_max; | ||
| 28 | |||
| 29 | /* Whether to use thread name provided by the system or by mallctl. */ | ||
| 30 | extern bool opt_prof_sys_thread_name; | ||
| 31 | |||
| 32 | /* Whether to record per size class counts and request size totals. */ | ||
| 33 | extern bool opt_prof_stats; | ||
| 34 | |||
| 35 | /* Accessed via prof_active_[gs]et{_unlocked,}(). */ | ||
| 36 | extern bool prof_active_state; | ||
| 37 | |||
| 38 | /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ | ||
| 39 | extern bool prof_gdump_val; | ||
| 40 | |||
| 41 | /* Profile dump interval, measured in bytes allocated. */ | ||
| 42 | extern uint64_t prof_interval; | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Initialized as opt_lg_prof_sample, and potentially modified during profiling | ||
| 46 | * resets. | ||
| 47 | */ | ||
| 48 | extern size_t lg_prof_sample; | ||
| 49 | |||
| 50 | extern bool prof_booted; | ||
| 51 | |||
| 52 | void prof_backtrace_hook_set(prof_backtrace_hook_t hook); | ||
| 53 | prof_backtrace_hook_t prof_backtrace_hook_get(); | ||
| 54 | |||
| 55 | void prof_dump_hook_set(prof_dump_hook_t hook); | ||
| 56 | prof_dump_hook_t prof_dump_hook_get(); | ||
| 57 | |||
| 58 | /* Functions only accessed in prof_inlines.h */ | ||
| 59 | prof_tdata_t *prof_tdata_init(tsd_t *tsd); | ||
| 60 | prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); | ||
| 61 | |||
| 62 | void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx); | ||
| 63 | void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size, | ||
| 64 | size_t usize, prof_tctx_t *tctx); | ||
| 65 | void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info); | ||
| 66 | prof_tctx_t *prof_tctx_create(tsd_t *tsd); | ||
| 67 | void prof_idump(tsdn_t *tsdn); | ||
| 68 | bool prof_mdump(tsd_t *tsd, const char *filename); | ||
| 69 | void prof_gdump(tsdn_t *tsdn); | ||
| 70 | |||
| 71 | void prof_tdata_cleanup(tsd_t *tsd); | ||
| 72 | bool prof_active_get(tsdn_t *tsdn); | ||
| 73 | bool prof_active_set(tsdn_t *tsdn, bool active); | ||
| 74 | const char *prof_thread_name_get(tsd_t *tsd); | ||
| 75 | int prof_thread_name_set(tsd_t *tsd, const char *thread_name); | ||
| 76 | bool prof_thread_active_get(tsd_t *tsd); | ||
| 77 | bool prof_thread_active_set(tsd_t *tsd, bool active); | ||
| 78 | bool prof_thread_active_init_get(tsdn_t *tsdn); | ||
| 79 | bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); | ||
| 80 | bool prof_gdump_get(tsdn_t *tsdn); | ||
| 81 | bool prof_gdump_set(tsdn_t *tsdn, bool active); | ||
| 82 | void prof_boot0(void); | ||
| 83 | void prof_boot1(void); | ||
| 84 | bool prof_boot2(tsd_t *tsd, base_t *base); | ||
| 85 | void prof_prefork0(tsdn_t *tsdn); | ||
| 86 | void prof_prefork1(tsdn_t *tsdn); | ||
| 87 | void prof_postfork_parent(tsdn_t *tsdn); | ||
| 88 | void prof_postfork_child(tsdn_t *tsdn); | ||
| 89 | |||
| 90 | /* Only accessed by thread event. */ | ||
| 91 | uint64_t prof_sample_new_event_wait(tsd_t *tsd); | ||
| 92 | uint64_t prof_sample_postponed_event_wait(tsd_t *tsd); | ||
| 93 | void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed); | ||
| 94 | |||
| 95 | #endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_hook.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_hook.h deleted file mode 100644 index 150d19d..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_hook.h +++ /dev/null | |||
| @@ -1,21 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_HOOK_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_HOOK_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * The hooks types of which are declared in this file are experimental and | ||
| 6 | * undocumented, thus the typedefs are located in an 'internal' header. | ||
| 7 | */ | ||
| 8 | |||
| 9 | /* | ||
| 10 | * A hook to mock out backtrace functionality. This can be handy, since it's | ||
| 11 | * otherwise difficult to guarantee that two allocations are reported as coming | ||
| 12 | * from the exact same stack trace in the presence of an optimizing compiler. | ||
| 13 | */ | ||
| 14 | typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned); | ||
| 15 | |||
| 16 | /* | ||
| 17 | * A callback hook that notifies about recently dumped heap profile. | ||
| 18 | */ | ||
| 19 | typedef void (*prof_dump_hook_t)(const char *filename); | ||
| 20 | |||
| 21 | #endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_inlines.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_inlines.h deleted file mode 100644 index a8e7e7f..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_inlines.h +++ /dev/null | |||
| @@ -1,261 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_INLINES_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_INLINES_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/safety_check.h" | ||
| 5 | #include "jemalloc/internal/sz.h" | ||
| 6 | #include "jemalloc/internal/thread_event.h" | ||
| 7 | |||
| 8 | JEMALLOC_ALWAYS_INLINE void | ||
| 9 | prof_active_assert() { | ||
| 10 | cassert(config_prof); | ||
| 11 | /* | ||
| 12 | * If opt_prof is off, then prof_active must always be off, regardless | ||
| 13 | * of whether prof_active_mtx is in effect or not. | ||
| 14 | */ | ||
| 15 | assert(opt_prof || !prof_active_state); | ||
| 16 | } | ||
| 17 | |||
| 18 | JEMALLOC_ALWAYS_INLINE bool | ||
| 19 | prof_active_get_unlocked(void) { | ||
| 20 | prof_active_assert(); | ||
| 21 | /* | ||
| 22 | * Even if opt_prof is true, sampling can be temporarily disabled by | ||
| 23 | * setting prof_active to false. No locking is used when reading | ||
| 24 | * prof_active in the fast path, so there are no guarantees regarding | ||
| 25 | * how long it will take for all threads to notice state changes. | ||
| 26 | */ | ||
| 27 | return prof_active_state; | ||
| 28 | } | ||
| 29 | |||
| 30 | JEMALLOC_ALWAYS_INLINE bool | ||
| 31 | prof_gdump_get_unlocked(void) { | ||
| 32 | /* | ||
| 33 | * No locking is used when reading prof_gdump_val in the fast path, so | ||
| 34 | * there are no guarantees regarding how long it will take for all | ||
| 35 | * threads to notice state changes. | ||
| 36 | */ | ||
| 37 | return prof_gdump_val; | ||
| 38 | } | ||
| 39 | |||
| 40 | JEMALLOC_ALWAYS_INLINE prof_tdata_t * | ||
| 41 | prof_tdata_get(tsd_t *tsd, bool create) { | ||
| 42 | prof_tdata_t *tdata; | ||
| 43 | |||
| 44 | cassert(config_prof); | ||
| 45 | |||
| 46 | tdata = tsd_prof_tdata_get(tsd); | ||
| 47 | if (create) { | ||
| 48 | assert(tsd_reentrancy_level_get(tsd) == 0); | ||
| 49 | if (unlikely(tdata == NULL)) { | ||
| 50 | if (tsd_nominal(tsd)) { | ||
| 51 | tdata = prof_tdata_init(tsd); | ||
| 52 | tsd_prof_tdata_set(tsd, tdata); | ||
| 53 | } | ||
| 54 | } else if (unlikely(tdata->expired)) { | ||
| 55 | tdata = prof_tdata_reinit(tsd, tdata); | ||
| 56 | tsd_prof_tdata_set(tsd, tdata); | ||
| 57 | } | ||
| 58 | assert(tdata == NULL || tdata->attached); | ||
| 59 | } | ||
| 60 | |||
| 61 | return tdata; | ||
| 62 | } | ||
| 63 | |||
| 64 | JEMALLOC_ALWAYS_INLINE void | ||
| 65 | prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, | ||
| 66 | prof_info_t *prof_info) { | ||
| 67 | cassert(config_prof); | ||
| 68 | assert(ptr != NULL); | ||
| 69 | assert(prof_info != NULL); | ||
| 70 | |||
| 71 | arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, false); | ||
| 72 | } | ||
| 73 | |||
| 74 | JEMALLOC_ALWAYS_INLINE void | ||
| 75 | prof_info_get_and_reset_recent(tsd_t *tsd, const void *ptr, | ||
| 76 | emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info) { | ||
| 77 | cassert(config_prof); | ||
| 78 | assert(ptr != NULL); | ||
| 79 | assert(prof_info != NULL); | ||
| 80 | |||
| 81 | arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, true); | ||
| 82 | } | ||
| 83 | |||
| 84 | JEMALLOC_ALWAYS_INLINE void | ||
| 85 | prof_tctx_reset(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) { | ||
| 86 | cassert(config_prof); | ||
| 87 | assert(ptr != NULL); | ||
| 88 | |||
| 89 | arena_prof_tctx_reset(tsd, ptr, alloc_ctx); | ||
| 90 | } | ||
| 91 | |||
| 92 | JEMALLOC_ALWAYS_INLINE void | ||
| 93 | prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) { | ||
| 94 | cassert(config_prof); | ||
| 95 | assert(ptr != NULL); | ||
| 96 | |||
| 97 | arena_prof_tctx_reset_sampled(tsd, ptr); | ||
| 98 | } | ||
| 99 | |||
| 100 | JEMALLOC_ALWAYS_INLINE void | ||
| 101 | prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) { | ||
| 102 | cassert(config_prof); | ||
| 103 | assert(edata != NULL); | ||
| 104 | assert((uintptr_t)tctx > (uintptr_t)1U); | ||
| 105 | |||
| 106 | arena_prof_info_set(tsd, edata, tctx, size); | ||
| 107 | } | ||
| 108 | |||
| 109 | JEMALLOC_ALWAYS_INLINE bool | ||
| 110 | prof_sample_should_skip(tsd_t *tsd, bool sample_event) { | ||
| 111 | cassert(config_prof); | ||
| 112 | |||
| 113 | /* Fastpath: no need to load tdata */ | ||
| 114 | if (likely(!sample_event)) { | ||
| 115 | return true; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * sample_event is always obtained from the thread event module, and | ||
| 120 | * whenever it's true, it means that the thread event module has | ||
| 121 | * already checked the reentrancy level. | ||
| 122 | */ | ||
| 123 | assert(tsd_reentrancy_level_get(tsd) == 0); | ||
| 124 | |||
| 125 | prof_tdata_t *tdata = prof_tdata_get(tsd, true); | ||
| 126 | if (unlikely(tdata == NULL)) { | ||
| 127 | return true; | ||
| 128 | } | ||
| 129 | |||
| 130 | return !tdata->active; | ||
| 131 | } | ||
| 132 | |||
| 133 | JEMALLOC_ALWAYS_INLINE prof_tctx_t * | ||
| 134 | prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) { | ||
| 135 | prof_tctx_t *ret; | ||
| 136 | |||
| 137 | if (!prof_active || | ||
| 138 | likely(prof_sample_should_skip(tsd, sample_event))) { | ||
| 139 | ret = (prof_tctx_t *)(uintptr_t)1U; | ||
| 140 | } else { | ||
| 141 | ret = prof_tctx_create(tsd); | ||
| 142 | } | ||
| 143 | |||
| 144 | return ret; | ||
| 145 | } | ||
| 146 | |||
| 147 | JEMALLOC_ALWAYS_INLINE void | ||
| 148 | prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize, | ||
| 149 | emap_alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { | ||
| 150 | cassert(config_prof); | ||
| 151 | assert(ptr != NULL); | ||
| 152 | assert(usize == isalloc(tsd_tsdn(tsd), ptr)); | ||
| 153 | |||
| 154 | if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { | ||
| 155 | prof_malloc_sample_object(tsd, ptr, size, usize, tctx); | ||
| 156 | } else { | ||
| 157 | prof_tctx_reset(tsd, ptr, alloc_ctx); | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | JEMALLOC_ALWAYS_INLINE void | ||
| 162 | prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize, | ||
| 163 | prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize, | ||
| 164 | prof_info_t *old_prof_info, bool sample_event) { | ||
| 165 | bool sampled, old_sampled, moved; | ||
| 166 | |||
| 167 | cassert(config_prof); | ||
| 168 | assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); | ||
| 169 | |||
| 170 | if (prof_active && ptr != NULL) { | ||
| 171 | assert(usize == isalloc(tsd_tsdn(tsd), ptr)); | ||
| 172 | if (prof_sample_should_skip(tsd, sample_event)) { | ||
| 173 | /* | ||
| 174 | * Don't sample. The usize passed to prof_alloc_prep() | ||
| 175 | * was larger than what actually got allocated, so a | ||
| 176 | * backtrace was captured for this allocation, even | ||
| 177 | * though its actual usize was insufficient to cross the | ||
| 178 | * sample threshold. | ||
| 179 | */ | ||
| 180 | prof_alloc_rollback(tsd, tctx); | ||
| 181 | tctx = (prof_tctx_t *)(uintptr_t)1U; | ||
| 182 | } | ||
| 183 | } | ||
| 184 | |||
| 185 | sampled = ((uintptr_t)tctx > (uintptr_t)1U); | ||
| 186 | old_sampled = ((uintptr_t)old_prof_info->alloc_tctx > (uintptr_t)1U); | ||
| 187 | moved = (ptr != old_ptr); | ||
| 188 | |||
| 189 | if (unlikely(sampled)) { | ||
| 190 | prof_malloc_sample_object(tsd, ptr, size, usize, tctx); | ||
| 191 | } else if (moved) { | ||
| 192 | prof_tctx_reset(tsd, ptr, NULL); | ||
| 193 | } else if (unlikely(old_sampled)) { | ||
| 194 | /* | ||
| 195 | * prof_tctx_reset() would work for the !moved case as well, | ||
| 196 | * but prof_tctx_reset_sampled() is slightly cheaper, and the | ||
| 197 | * proper thing to do here in the presence of explicit | ||
| 198 | * knowledge re: moved state. | ||
| 199 | */ | ||
| 200 | prof_tctx_reset_sampled(tsd, ptr); | ||
| 201 | } else { | ||
| 202 | prof_info_t prof_info; | ||
| 203 | prof_info_get(tsd, ptr, NULL, &prof_info); | ||
| 204 | assert((uintptr_t)prof_info.alloc_tctx == (uintptr_t)1U); | ||
| 205 | } | ||
| 206 | |||
| 207 | /* | ||
| 208 | * The prof_free_sampled_object() call must come after the | ||
| 209 | * prof_malloc_sample_object() call, because tctx and old_tctx may be | ||
| 210 | * the same, in which case reversing the call order could cause the tctx | ||
| 211 | * to be prematurely destroyed as a side effect of momentarily zeroed | ||
| 212 | * counters. | ||
| 213 | */ | ||
| 214 | if (unlikely(old_sampled)) { | ||
| 215 | prof_free_sampled_object(tsd, old_usize, old_prof_info); | ||
| 216 | } | ||
| 217 | } | ||
| 218 | |||
| 219 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 220 | prof_sample_align(size_t orig_align) { | ||
| 221 | /* | ||
| 222 | * Enforce page alignment, so that sampled allocations can be identified | ||
| 223 | * w/o metadata lookup. | ||
| 224 | */ | ||
| 225 | assert(opt_prof); | ||
| 226 | return (opt_cache_oblivious && orig_align < PAGE) ? PAGE : | ||
| 227 | orig_align; | ||
| 228 | } | ||
| 229 | |||
| 230 | JEMALLOC_ALWAYS_INLINE bool | ||
| 231 | prof_sample_aligned(const void *ptr) { | ||
| 232 | return ((uintptr_t)ptr & PAGE_MASK) == 0; | ||
| 233 | } | ||
| 234 | |||
| 235 | JEMALLOC_ALWAYS_INLINE bool | ||
| 236 | prof_sampled(tsd_t *tsd, const void *ptr) { | ||
| 237 | prof_info_t prof_info; | ||
| 238 | prof_info_get(tsd, ptr, NULL, &prof_info); | ||
| 239 | bool sampled = (uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U; | ||
| 240 | if (sampled) { | ||
| 241 | assert(prof_sample_aligned(ptr)); | ||
| 242 | } | ||
| 243 | return sampled; | ||
| 244 | } | ||
| 245 | |||
| 246 | JEMALLOC_ALWAYS_INLINE void | ||
| 247 | prof_free(tsd_t *tsd, const void *ptr, size_t usize, | ||
| 248 | emap_alloc_ctx_t *alloc_ctx) { | ||
| 249 | prof_info_t prof_info; | ||
| 250 | prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info); | ||
| 251 | |||
| 252 | cassert(config_prof); | ||
| 253 | assert(usize == isalloc(tsd_tsdn(tsd), ptr)); | ||
| 254 | |||
| 255 | if (unlikely((uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U)) { | ||
| 256 | assert(prof_sample_aligned(ptr)); | ||
| 257 | prof_free_sampled_object(tsd, usize, &prof_info); | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | #endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_log.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_log.h deleted file mode 100644 index ccb557d..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_log.h +++ /dev/null | |||
| @@ -1,22 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_LOG_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_LOG_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/mutex.h" | ||
| 5 | |||
| 6 | extern malloc_mutex_t log_mtx; | ||
| 7 | |||
| 8 | void prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info); | ||
| 9 | bool prof_log_init(tsd_t *tsdn); | ||
| 10 | |||
| 11 | /* Used in unit tests. */ | ||
| 12 | size_t prof_log_bt_count(void); | ||
| 13 | size_t prof_log_alloc_count(void); | ||
| 14 | size_t prof_log_thr_count(void); | ||
| 15 | bool prof_log_is_logging(void); | ||
| 16 | bool prof_log_rep_check(void); | ||
| 17 | void prof_log_dummy_set(bool new_value); | ||
| 18 | |||
| 19 | bool prof_log_start(tsdn_t *tsdn, const char *filename); | ||
| 20 | bool prof_log_stop(tsdn_t *tsdn); | ||
| 21 | |||
| 22 | #endif /* JEMALLOC_INTERNAL_PROF_LOG_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_recent.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_recent.h deleted file mode 100644 index df41023..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_recent.h +++ /dev/null | |||
| @@ -1,23 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_RECENT_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_RECENT_H | ||
| 3 | |||
| 4 | extern malloc_mutex_t prof_recent_alloc_mtx; | ||
| 5 | extern malloc_mutex_t prof_recent_dump_mtx; | ||
| 6 | |||
| 7 | bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx); | ||
| 8 | void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize); | ||
| 9 | void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata); | ||
| 10 | bool prof_recent_init(); | ||
| 11 | void edata_prof_recent_alloc_init(edata_t *edata); | ||
| 12 | |||
| 13 | /* Used in unit tests. */ | ||
| 14 | typedef ql_head(prof_recent_t) prof_recent_list_t; | ||
| 15 | extern prof_recent_list_t prof_recent_alloc_list; | ||
| 16 | edata_t *prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *node); | ||
| 17 | prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata); | ||
| 18 | |||
| 19 | ssize_t prof_recent_alloc_max_ctl_read(); | ||
| 20 | ssize_t prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max); | ||
| 21 | void prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque); | ||
| 22 | |||
| 23 | #endif /* JEMALLOC_INTERNAL_PROF_RECENT_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_stats.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_stats.h deleted file mode 100644 index 7954e82..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_stats.h +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_STATS_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_STATS_H | ||
| 3 | |||
| 4 | typedef struct prof_stats_s prof_stats_t; | ||
| 5 | struct prof_stats_s { | ||
| 6 | uint64_t req_sum; | ||
| 7 | uint64_t count; | ||
| 8 | }; | ||
| 9 | |||
| 10 | extern malloc_mutex_t prof_stats_mtx; | ||
| 11 | |||
| 12 | void prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size); | ||
| 13 | void prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size); | ||
| 14 | void prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats); | ||
| 15 | void prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats); | ||
| 16 | |||
| 17 | #endif /* JEMALLOC_INTERNAL_PROF_STATS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_structs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_structs.h deleted file mode 100644 index dd22115..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_structs.h +++ /dev/null | |||
| @@ -1,221 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_STRUCTS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/ckh.h" | ||
| 5 | #include "jemalloc/internal/edata.h" | ||
| 6 | #include "jemalloc/internal/mutex.h" | ||
| 7 | #include "jemalloc/internal/prng.h" | ||
| 8 | #include "jemalloc/internal/rb.h" | ||
| 9 | |||
| 10 | struct prof_bt_s { | ||
| 11 | /* Backtrace, stored as len program counters. */ | ||
| 12 | void **vec; | ||
| 13 | unsigned len; | ||
| 14 | }; | ||
| 15 | |||
| 16 | #ifdef JEMALLOC_PROF_LIBGCC | ||
| 17 | /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ | ||
| 18 | typedef struct { | ||
| 19 | void **vec; | ||
| 20 | unsigned *len; | ||
| 21 | unsigned max; | ||
| 22 | } prof_unwind_data_t; | ||
| 23 | #endif | ||
| 24 | |||
| 25 | struct prof_cnt_s { | ||
| 26 | /* Profiling counters. */ | ||
| 27 | uint64_t curobjs; | ||
| 28 | uint64_t curobjs_shifted_unbiased; | ||
| 29 | uint64_t curbytes; | ||
| 30 | uint64_t curbytes_unbiased; | ||
| 31 | uint64_t accumobjs; | ||
| 32 | uint64_t accumobjs_shifted_unbiased; | ||
| 33 | uint64_t accumbytes; | ||
| 34 | uint64_t accumbytes_unbiased; | ||
| 35 | }; | ||
| 36 | |||
| 37 | typedef enum { | ||
| 38 | prof_tctx_state_initializing, | ||
| 39 | prof_tctx_state_nominal, | ||
| 40 | prof_tctx_state_dumping, | ||
| 41 | prof_tctx_state_purgatory /* Dumper must finish destroying. */ | ||
| 42 | } prof_tctx_state_t; | ||
| 43 | |||
| 44 | struct prof_tctx_s { | ||
| 45 | /* Thread data for thread that performed the allocation. */ | ||
| 46 | prof_tdata_t *tdata; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be | ||
| 50 | * defunct during teardown. | ||
| 51 | */ | ||
| 52 | uint64_t thr_uid; | ||
| 53 | uint64_t thr_discrim; | ||
| 54 | |||
| 55 | /* | ||
| 56 | * Reference count of how many times this tctx object is referenced in | ||
| 57 | * recent allocation / deallocation records, protected by tdata->lock. | ||
| 58 | */ | ||
| 59 | uint64_t recent_count; | ||
| 60 | |||
| 61 | /* Profiling counters, protected by tdata->lock. */ | ||
| 62 | prof_cnt_t cnts; | ||
| 63 | |||
| 64 | /* Associated global context. */ | ||
| 65 | prof_gctx_t *gctx; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * UID that distinguishes multiple tctx's created by the same thread, | ||
| 69 | * but coexisting in gctx->tctxs. There are two ways that such | ||
| 70 | * coexistence can occur: | ||
| 71 | * - A dumper thread can cause a tctx to be retained in the purgatory | ||
| 72 | * state. | ||
| 73 | * - Although a single "producer" thread must create all tctx's which | ||
| 74 | * share the same thr_uid, multiple "consumers" can each concurrently | ||
| 75 | * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only | ||
| 76 | * gets called once each time cnts.cur{objs,bytes} drop to 0, but this | ||
| 77 | * threshold can be hit again before the first consumer finishes | ||
| 78 | * executing prof_tctx_destroy(). | ||
| 79 | */ | ||
| 80 | uint64_t tctx_uid; | ||
| 81 | |||
| 82 | /* Linkage into gctx's tctxs. */ | ||
| 83 | rb_node(prof_tctx_t) tctx_link; | ||
| 84 | |||
| 85 | /* | ||
| 86 | * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents | ||
| 87 | * sample vs destroy race. | ||
| 88 | */ | ||
| 89 | bool prepared; | ||
| 90 | |||
| 91 | /* Current dump-related state, protected by gctx->lock. */ | ||
| 92 | prof_tctx_state_t state; | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Copy of cnts snapshotted during early dump phase, protected by | ||
| 96 | * dump_mtx. | ||
| 97 | */ | ||
| 98 | prof_cnt_t dump_cnts; | ||
| 99 | }; | ||
| 100 | typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; | ||
| 101 | |||
| 102 | struct prof_info_s { | ||
| 103 | /* Time when the allocation was made. */ | ||
| 104 | nstime_t alloc_time; | ||
| 105 | /* Points to the prof_tctx_t corresponding to the allocation. */ | ||
| 106 | prof_tctx_t *alloc_tctx; | ||
| 107 | /* Allocation request size. */ | ||
| 108 | size_t alloc_size; | ||
| 109 | }; | ||
| 110 | |||
| 111 | struct prof_gctx_s { | ||
| 112 | /* Protects nlimbo, cnt_summed, and tctxs. */ | ||
| 113 | malloc_mutex_t *lock; | ||
| 114 | |||
| 115 | /* | ||
| 116 | * Number of threads that currently cause this gctx to be in a state of | ||
| 117 | * limbo due to one of: | ||
| 118 | * - Initializing this gctx. | ||
| 119 | * - Initializing per thread counters associated with this gctx. | ||
| 120 | * - Preparing to destroy this gctx. | ||
| 121 | * - Dumping a heap profile that includes this gctx. | ||
| 122 | * nlimbo must be 1 (single destroyer) in order to safely destroy the | ||
| 123 | * gctx. | ||
| 124 | */ | ||
| 125 | unsigned nlimbo; | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Tree of profile counters, one for each thread that has allocated in | ||
| 129 | * this context. | ||
| 130 | */ | ||
| 131 | prof_tctx_tree_t tctxs; | ||
| 132 | |||
| 133 | /* Linkage for tree of contexts to be dumped. */ | ||
| 134 | rb_node(prof_gctx_t) dump_link; | ||
| 135 | |||
| 136 | /* Temporary storage for summation during dump. */ | ||
| 137 | prof_cnt_t cnt_summed; | ||
| 138 | |||
| 139 | /* Associated backtrace. */ | ||
| 140 | prof_bt_t bt; | ||
| 141 | |||
| 142 | /* Backtrace vector, variable size, referred to by bt. */ | ||
| 143 | void *vec[1]; | ||
| 144 | }; | ||
| 145 | typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; | ||
| 146 | |||
| 147 | struct prof_tdata_s { | ||
| 148 | malloc_mutex_t *lock; | ||
| 149 | |||
| 150 | /* Monotonically increasing unique thread identifier. */ | ||
| 151 | uint64_t thr_uid; | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Monotonically increasing discriminator among tdata structures | ||
| 155 | * associated with the same thr_uid. | ||
| 156 | */ | ||
| 157 | uint64_t thr_discrim; | ||
| 158 | |||
| 159 | /* Included in heap profile dumps if non-NULL. */ | ||
| 160 | char *thread_name; | ||
| 161 | |||
| 162 | bool attached; | ||
| 163 | bool expired; | ||
| 164 | |||
| 165 | rb_node(prof_tdata_t) tdata_link; | ||
| 166 | |||
| 167 | /* | ||
| 168 | * Counter used to initialize prof_tctx_t's tctx_uid. No locking is | ||
| 169 | * necessary when incrementing this field, because only one thread ever | ||
| 170 | * does so. | ||
| 171 | */ | ||
| 172 | uint64_t tctx_uid_next; | ||
| 173 | |||
| 174 | /* | ||
| 175 | * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks | ||
| 176 | * backtraces for which it has non-zero allocation/deallocation counters | ||
| 177 | * associated with thread-specific prof_tctx_t objects. Other threads | ||
| 178 | * may write to prof_tctx_t contents when freeing associated objects. | ||
| 179 | */ | ||
| 180 | ckh_t bt2tctx; | ||
| 181 | |||
| 182 | /* State used to avoid dumping while operating on prof internals. */ | ||
| 183 | bool enq; | ||
| 184 | bool enq_idump; | ||
| 185 | bool enq_gdump; | ||
| 186 | |||
| 187 | /* | ||
| 188 | * Set to true during an early dump phase for tdata's which are | ||
| 189 | * currently being dumped. New threads' tdata's have this initialized | ||
| 190 | * to false so that they aren't accidentally included in later dump | ||
| 191 | * phases. | ||
| 192 | */ | ||
| 193 | bool dumping; | ||
| 194 | |||
| 195 | /* | ||
| 196 | * True if profiling is active for this tdata's thread | ||
| 197 | * (thread.prof.active mallctl). | ||
| 198 | */ | ||
| 199 | bool active; | ||
| 200 | |||
| 201 | /* Temporary storage for summation during dump. */ | ||
| 202 | prof_cnt_t cnt_summed; | ||
| 203 | |||
| 204 | /* Backtrace vector, used for calls to prof_backtrace(). */ | ||
| 205 | void *vec[PROF_BT_MAX]; | ||
| 206 | }; | ||
| 207 | typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; | ||
| 208 | |||
| 209 | struct prof_recent_s { | ||
| 210 | nstime_t alloc_time; | ||
| 211 | nstime_t dalloc_time; | ||
| 212 | |||
| 213 | ql_elm(prof_recent_t) link; | ||
| 214 | size_t size; | ||
| 215 | size_t usize; | ||
| 216 | atomic_p_t alloc_edata; /* NULL means allocation has been freed. */ | ||
| 217 | prof_tctx_t *alloc_tctx; | ||
| 218 | prof_tctx_t *dalloc_tctx; | ||
| 219 | }; | ||
| 220 | |||
| 221 | #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_sys.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_sys.h deleted file mode 100644 index 3d25a42..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_sys.h +++ /dev/null | |||
| @@ -1,30 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_SYS_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_SYS_H | ||
| 3 | |||
| 4 | extern malloc_mutex_t prof_dump_filename_mtx; | ||
| 5 | extern base_t *prof_base; | ||
| 6 | |||
| 7 | void bt_init(prof_bt_t *bt, void **vec); | ||
| 8 | void prof_backtrace(tsd_t *tsd, prof_bt_t *bt); | ||
| 9 | void prof_hooks_init(); | ||
| 10 | void prof_unwind_init(); | ||
| 11 | void prof_sys_thread_name_fetch(tsd_t *tsd); | ||
| 12 | int prof_getpid(void); | ||
| 13 | void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind); | ||
| 14 | bool prof_prefix_set(tsdn_t *tsdn, const char *prefix); | ||
| 15 | void prof_fdump_impl(tsd_t *tsd); | ||
| 16 | void prof_idump_impl(tsd_t *tsd); | ||
| 17 | bool prof_mdump_impl(tsd_t *tsd, const char *filename); | ||
| 18 | void prof_gdump_impl(tsd_t *tsd); | ||
| 19 | |||
| 20 | /* Used in unit tests. */ | ||
| 21 | typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit); | ||
| 22 | extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read; | ||
| 23 | typedef int (prof_dump_open_file_t)(const char *, int); | ||
| 24 | extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file; | ||
| 25 | typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t); | ||
| 26 | extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file; | ||
| 27 | typedef int (prof_dump_open_maps_t)(); | ||
| 28 | extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps; | ||
| 29 | |||
| 30 | #endif /* JEMALLOC_INTERNAL_PROF_SYS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_types.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_types.h deleted file mode 100644 index ba62865..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/prof_types.h +++ /dev/null | |||
| @@ -1,75 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PROF_TYPES_H | ||
| 2 | #define JEMALLOC_INTERNAL_PROF_TYPES_H | ||
| 3 | |||
| 4 | typedef struct prof_bt_s prof_bt_t; | ||
| 5 | typedef struct prof_cnt_s prof_cnt_t; | ||
| 6 | typedef struct prof_tctx_s prof_tctx_t; | ||
| 7 | typedef struct prof_info_s prof_info_t; | ||
| 8 | typedef struct prof_gctx_s prof_gctx_t; | ||
| 9 | typedef struct prof_tdata_s prof_tdata_t; | ||
| 10 | typedef struct prof_recent_s prof_recent_t; | ||
| 11 | |||
| 12 | /* Option defaults. */ | ||
| 13 | #ifdef JEMALLOC_PROF | ||
| 14 | # define PROF_PREFIX_DEFAULT "jeprof" | ||
| 15 | #else | ||
| 16 | # define PROF_PREFIX_DEFAULT "" | ||
| 17 | #endif | ||
| 18 | #define LG_PROF_SAMPLE_DEFAULT 19 | ||
| 19 | #define LG_PROF_INTERVAL_DEFAULT -1 | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Hard limit on stack backtrace depth. The version of prof_backtrace() that | ||
| 23 | * is based on __builtin_return_address() necessarily has a hard-coded number | ||
| 24 | * of backtrace frame handlers, and should be kept in sync with this setting. | ||
| 25 | */ | ||
| 26 | #define PROF_BT_MAX 128 | ||
| 27 | |||
| 28 | /* Initial hash table size. */ | ||
| 29 | #define PROF_CKH_MINITEMS 64 | ||
| 30 | |||
| 31 | /* Size of memory buffer to use when writing dump files. */ | ||
| 32 | #ifndef JEMALLOC_PROF | ||
| 33 | /* Minimize memory bloat for non-prof builds. */ | ||
| 34 | # define PROF_DUMP_BUFSIZE 1 | ||
| 35 | #elif defined(JEMALLOC_DEBUG) | ||
| 36 | /* Use a small buffer size in debug build, mainly to facilitate testing. */ | ||
| 37 | # define PROF_DUMP_BUFSIZE 16 | ||
| 38 | #else | ||
| 39 | # define PROF_DUMP_BUFSIZE 65536 | ||
| 40 | #endif | ||
| 41 | |||
| 42 | /* Size of size class related tables */ | ||
| 43 | #ifdef JEMALLOC_PROF | ||
| 44 | # define PROF_SC_NSIZES SC_NSIZES | ||
| 45 | #else | ||
| 46 | /* Minimize memory bloat for non-prof builds. */ | ||
| 47 | # define PROF_SC_NSIZES 1 | ||
| 48 | #endif | ||
| 49 | |||
| 50 | /* Size of stack-allocated buffer used by prof_printf(). */ | ||
| 51 | #define PROF_PRINTF_BUFSIZE 128 | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Number of mutexes shared among all gctx's. No space is allocated for these | ||
| 55 | * unless profiling is enabled, so it's okay to over-provision. | ||
| 56 | */ | ||
| 57 | #define PROF_NCTX_LOCKS 1024 | ||
| 58 | |||
| 59 | /* | ||
| 60 | * Number of mutexes shared among all tdata's. No space is allocated for these | ||
| 61 | * unless profiling is enabled, so it's okay to over-provision. | ||
| 62 | */ | ||
| 63 | #define PROF_NTDATA_LOCKS 256 | ||
| 64 | |||
| 65 | /* Minimize memory bloat for non-prof builds. */ | ||
| 66 | #ifdef JEMALLOC_PROF | ||
| 67 | #define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1) | ||
| 68 | #else | ||
| 69 | #define PROF_DUMP_FILENAME_LEN 1 | ||
| 70 | #endif | ||
| 71 | |||
| 72 | /* Default number of recent allocations to record. */ | ||
| 73 | #define PROF_RECENT_ALLOC_MAX_DEFAULT 0 | ||
| 74 | |||
| 75 | #endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/psset.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/psset.h deleted file mode 100644 index e1d6497..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/psset.h +++ /dev/null | |||
| @@ -1,131 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_PSSET_H | ||
| 2 | #define JEMALLOC_INTERNAL_PSSET_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/hpdata.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains | ||
| 8 | * a collection of page-slabs (the intent being that they are backed by | ||
| 9 | * hugepages, or at least could be), and handles allocation and deallocation | ||
| 10 | * requests. | ||
| 11 | */ | ||
| 12 | |||
| 13 | /* | ||
| 14 | * One more than the maximum pszind_t we will serve out of the HPA. | ||
| 15 | * Practically, we expect only the first few to be actually used. This | ||
| 16 | * corresponds to a maximum size of of 512MB on systems with 4k pages and | ||
| 17 | * SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you | ||
| 18 | * can think of this as being SC_NPSIZES, but there's no sense in wasting that | ||
| 19 | * much space in the arena, making bitmaps that much larger, etc. | ||
| 20 | */ | ||
| 21 | #define PSSET_NPSIZES 64 | ||
| 22 | |||
| 23 | /* | ||
| 24 | * We keep two purge lists per page size class; one for hugified hpdatas (at | ||
| 25 | * index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind + | ||
| 26 | * 1). This lets us implement a preference for purging non-hugified hpdatas | ||
| 27 | * among similarly-dirty ones. | ||
| 28 | * We reserve the last two indices for empty slabs, in that case purging | ||
| 29 | * hugified ones (which are definitionally all waste) before non-hugified ones | ||
| 30 | * (i.e. reversing the order). | ||
| 31 | */ | ||
| 32 | #define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES) | ||
| 33 | |||
| 34 | typedef struct psset_bin_stats_s psset_bin_stats_t; | ||
| 35 | struct psset_bin_stats_s { | ||
| 36 | /* How many pageslabs are in this bin? */ | ||
| 37 | size_t npageslabs; | ||
| 38 | /* Of them, how many pages are active? */ | ||
| 39 | size_t nactive; | ||
| 40 | /* And how many are dirty? */ | ||
| 41 | size_t ndirty; | ||
| 42 | }; | ||
| 43 | |||
| 44 | typedef struct psset_stats_s psset_stats_t; | ||
| 45 | struct psset_stats_s { | ||
| 46 | /* | ||
| 47 | * The second index is huge stats; nonfull_slabs[pszind][0] contains | ||
| 48 | * stats for the non-huge slabs in bucket pszind, while | ||
| 49 | * nonfull_slabs[pszind][1] contains stats for the huge slabs. | ||
| 50 | */ | ||
| 51 | psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2]; | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Full slabs don't live in any edata heap, but we still track their | ||
| 55 | * stats. | ||
| 56 | */ | ||
| 57 | psset_bin_stats_t full_slabs[2]; | ||
| 58 | |||
| 59 | /* Empty slabs are similar. */ | ||
| 60 | psset_bin_stats_t empty_slabs[2]; | ||
| 61 | }; | ||
| 62 | |||
| 63 | typedef struct psset_s psset_t; | ||
| 64 | struct psset_s { | ||
| 65 | /* | ||
| 66 | * The pageslabs, quantized by the size class of the largest contiguous | ||
| 67 | * free run of pages in a pageslab. | ||
| 68 | */ | ||
| 69 | hpdata_age_heap_t pageslabs[PSSET_NPSIZES]; | ||
| 70 | /* Bitmap for which set bits correspond to non-empty heaps. */ | ||
| 71 | fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)]; | ||
| 72 | /* | ||
| 73 | * The sum of all bin stats in stats. This lets us quickly answer | ||
| 74 | * queries for the number of dirty, active, and retained pages in the | ||
| 75 | * entire set. | ||
| 76 | */ | ||
| 77 | psset_bin_stats_t merged_stats; | ||
| 78 | psset_stats_t stats; | ||
| 79 | /* | ||
| 80 | * Slabs with no active allocations, but which are allowed to serve new | ||
| 81 | * allocations. | ||
| 82 | */ | ||
| 83 | hpdata_empty_list_t empty; | ||
| 84 | /* | ||
| 85 | * Slabs which are available to be purged, ordered by how much we want | ||
| 86 | * to purge them (with later indices indicating slabs we want to purge | ||
| 87 | * more). | ||
| 88 | */ | ||
| 89 | hpdata_purge_list_t to_purge[PSSET_NPURGE_LISTS]; | ||
| 90 | /* Bitmap for which set bits correspond to non-empty purge lists. */ | ||
| 91 | fb_group_t purge_bitmap[FB_NGROUPS(PSSET_NPURGE_LISTS)]; | ||
| 92 | /* Slabs which are available to be hugified. */ | ||
| 93 | hpdata_hugify_list_t to_hugify; | ||
| 94 | }; | ||
| 95 | |||
| 96 | void psset_init(psset_t *psset); | ||
| 97 | void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src); | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Begin or end updating the given pageslab's metadata. While the pageslab is | ||
| 101 | * being updated, it won't be returned from psset_fit calls. | ||
| 102 | */ | ||
| 103 | void psset_update_begin(psset_t *psset, hpdata_t *ps); | ||
| 104 | void psset_update_end(psset_t *psset, hpdata_t *ps); | ||
| 105 | |||
| 106 | /* Analogous to the eset_fit; pick a hpdata to serve the request. */ | ||
| 107 | hpdata_t *psset_pick_alloc(psset_t *psset, size_t size); | ||
| 108 | /* Pick one to purge. */ | ||
| 109 | hpdata_t *psset_pick_purge(psset_t *psset); | ||
| 110 | /* Pick one to hugify. */ | ||
| 111 | hpdata_t *psset_pick_hugify(psset_t *psset); | ||
| 112 | |||
| 113 | void psset_insert(psset_t *psset, hpdata_t *ps); | ||
| 114 | void psset_remove(psset_t *psset, hpdata_t *ps); | ||
| 115 | |||
| 116 | static inline size_t | ||
| 117 | psset_npageslabs(psset_t *psset) { | ||
| 118 | return psset->merged_stats.npageslabs; | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline size_t | ||
| 122 | psset_nactive(psset_t *psset) { | ||
| 123 | return psset->merged_stats.nactive; | ||
| 124 | } | ||
| 125 | |||
| 126 | static inline size_t | ||
| 127 | psset_ndirty(psset_t *psset) { | ||
| 128 | return psset->merged_stats.ndirty; | ||
| 129 | } | ||
| 130 | |||
| 131 | #endif /* JEMALLOC_INTERNAL_PSSET_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/public_namespace.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/public_namespace.sh deleted file mode 100755 index 4d415ba..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/public_namespace.sh +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | |||
| 3 | for nm in `cat $1` ; do | ||
| 4 | n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` | ||
| 5 | echo "#define je_${n} JEMALLOC_N(${n})" | ||
| 6 | done | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh deleted file mode 100755 index 4239d17..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | |||
| 3 | for nm in `cat $1` ; do | ||
| 4 | n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` | ||
| 5 | echo "#undef je_${n}" | ||
| 6 | done | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ql.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ql.h deleted file mode 100644 index c7f52f8..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ql.h +++ /dev/null | |||
| @@ -1,197 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_QL_H | ||
| 2 | #define JEMALLOC_INTERNAL_QL_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/qr.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * A linked-list implementation. | ||
| 8 | * | ||
| 9 | * This is built on top of the ring implementation, but that can be viewed as an | ||
| 10 | * implementation detail (i.e. trying to advance past the tail of the list | ||
| 11 | * doesn't wrap around). | ||
| 12 | * | ||
| 13 | * You define a struct like so: | ||
| 14 | * typedef strucy my_s my_t; | ||
| 15 | * struct my_s { | ||
| 16 | * int data; | ||
| 17 | * ql_elm(my_t) my_link; | ||
| 18 | * }; | ||
| 19 | * | ||
| 20 | * // We wobble between "list" and "head" for this type; we're now mostly | ||
| 21 | * // heading towards "list". | ||
| 22 | * typedef ql_head(my_t) my_list_t; | ||
| 23 | * | ||
| 24 | * You then pass a my_list_t * for a_head arguments, a my_t * for a_elm | ||
| 25 | * arguments, the token "my_link" for a_field arguments, and the token "my_t" | ||
| 26 | * for a_type arguments. | ||
| 27 | */ | ||
| 28 | |||
| 29 | /* List definitions. */ | ||
| 30 | #define ql_head(a_type) \ | ||
| 31 | struct { \ | ||
| 32 | a_type *qlh_first; \ | ||
| 33 | } | ||
| 34 | |||
| 35 | /* Static initializer for an empty list. */ | ||
| 36 | #define ql_head_initializer(a_head) {NULL} | ||
| 37 | |||
| 38 | /* The field definition. */ | ||
| 39 | #define ql_elm(a_type) qr(a_type) | ||
| 40 | |||
| 41 | /* A pointer to the first element in the list, or NULL if the list is empty. */ | ||
| 42 | #define ql_first(a_head) ((a_head)->qlh_first) | ||
| 43 | |||
| 44 | /* Dynamically initializes a list. */ | ||
| 45 | #define ql_new(a_head) do { \ | ||
| 46 | ql_first(a_head) = NULL; \ | ||
| 47 | } while (0) | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Sets dest to be the contents of src (overwriting any elements there), leaving | ||
| 51 | * src empty. | ||
| 52 | */ | ||
| 53 | #define ql_move(a_head_dest, a_head_src) do { \ | ||
| 54 | ql_first(a_head_dest) = ql_first(a_head_src); \ | ||
| 55 | ql_new(a_head_src); \ | ||
| 56 | } while (0) | ||
| 57 | |||
| 58 | /* True if the list is empty, otherwise false. */ | ||
| 59 | #define ql_empty(a_head) (ql_first(a_head) == NULL) | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Initializes a ql_elm. Must be called even if the field is about to be | ||
| 63 | * overwritten. | ||
| 64 | */ | ||
| 65 | #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Obtains the last item in the list. | ||
| 69 | */ | ||
| 70 | #define ql_last(a_head, a_field) \ | ||
| 71 | (ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field)) | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Gets a pointer to the next/prev element in the list. Trying to advance past | ||
| 75 | * the end or retreat before the beginning of the list returns NULL. | ||
| 76 | */ | ||
| 77 | #define ql_next(a_head, a_elm, a_field) \ | ||
| 78 | ((ql_last(a_head, a_field) != (a_elm)) \ | ||
| 79 | ? qr_next((a_elm), a_field) : NULL) | ||
| 80 | #define ql_prev(a_head, a_elm, a_field) \ | ||
| 81 | ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ | ||
| 82 | : NULL) | ||
| 83 | |||
| 84 | /* Inserts a_elm before a_qlelm in the list. */ | ||
| 85 | #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ | ||
| 86 | qr_before_insert((a_qlelm), (a_elm), a_field); \ | ||
| 87 | if (ql_first(a_head) == (a_qlelm)) { \ | ||
| 88 | ql_first(a_head) = (a_elm); \ | ||
| 89 | } \ | ||
| 90 | } while (0) | ||
| 91 | |||
| 92 | /* Inserts a_elm after a_qlelm in the list. */ | ||
| 93 | #define ql_after_insert(a_qlelm, a_elm, a_field) \ | ||
| 94 | qr_after_insert((a_qlelm), (a_elm), a_field) | ||
| 95 | |||
| 96 | /* Inserts a_elm as the first item in the list. */ | ||
| 97 | #define ql_head_insert(a_head, a_elm, a_field) do { \ | ||
| 98 | if (!ql_empty(a_head)) { \ | ||
| 99 | qr_before_insert(ql_first(a_head), (a_elm), a_field); \ | ||
| 100 | } \ | ||
| 101 | ql_first(a_head) = (a_elm); \ | ||
| 102 | } while (0) | ||
| 103 | |||
| 104 | /* Inserts a_elm as the last item in the list. */ | ||
| 105 | #define ql_tail_insert(a_head, a_elm, a_field) do { \ | ||
| 106 | if (!ql_empty(a_head)) { \ | ||
| 107 | qr_before_insert(ql_first(a_head), (a_elm), a_field); \ | ||
| 108 | } \ | ||
| 109 | ql_first(a_head) = qr_next((a_elm), a_field); \ | ||
| 110 | } while (0) | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in: | ||
| 114 | * a = [a1, ..., a_n, b_1, ..., b_n] and b = []. | ||
| 115 | */ | ||
| 116 | #define ql_concat(a_head_a, a_head_b, a_field) do { \ | ||
| 117 | if (ql_empty(a_head_a)) { \ | ||
| 118 | ql_move(a_head_a, a_head_b); \ | ||
| 119 | } else if (!ql_empty(a_head_b)) { \ | ||
| 120 | qr_meld(ql_first(a_head_a), ql_first(a_head_b), \ | ||
| 121 | a_field); \ | ||
| 122 | ql_new(a_head_b); \ | ||
| 123 | } \ | ||
| 124 | } while (0) | ||
| 125 | |||
| 126 | /* Removes a_elm from the list. */ | ||
| 127 | #define ql_remove(a_head, a_elm, a_field) do { \ | ||
| 128 | if (ql_first(a_head) == (a_elm)) { \ | ||
| 129 | ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ | ||
| 130 | } \ | ||
| 131 | if (ql_first(a_head) != (a_elm)) { \ | ||
| 132 | qr_remove((a_elm), a_field); \ | ||
| 133 | } else { \ | ||
| 134 | ql_new(a_head); \ | ||
| 135 | } \ | ||
| 136 | } while (0) | ||
| 137 | |||
| 138 | /* Removes the first item in the list. */ | ||
| 139 | #define ql_head_remove(a_head, a_type, a_field) do { \ | ||
| 140 | a_type *t = ql_first(a_head); \ | ||
| 141 | ql_remove((a_head), t, a_field); \ | ||
| 142 | } while (0) | ||
| 143 | |||
| 144 | /* Removes the last item in the list. */ | ||
| 145 | #define ql_tail_remove(a_head, a_type, a_field) do { \ | ||
| 146 | a_type *t = ql_last(a_head, a_field); \ | ||
| 147 | ql_remove((a_head), t, a_field); \ | ||
| 148 | } while (0) | ||
| 149 | |||
| 150 | /* | ||
| 151 | * Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...], | ||
| 152 | * ql_split(a, a_n, b, some_field) results in | ||
| 153 | * a = [a_1, a_2, ..., a_n-1] | ||
| 154 | * and replaces b's contents with: | ||
| 155 | * b = [a_n, a_n+1, ...] | ||
| 156 | */ | ||
| 157 | #define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \ | ||
| 158 | if (ql_first(a_head_a) == (a_elm)) { \ | ||
| 159 | ql_move(a_head_b, a_head_a); \ | ||
| 160 | } else { \ | ||
| 161 | qr_split(ql_first(a_head_a), (a_elm), a_field); \ | ||
| 162 | ql_first(a_head_b) = (a_elm); \ | ||
| 163 | } \ | ||
| 164 | } while (0) | ||
| 165 | |||
| 166 | /* | ||
| 167 | * An optimized version of: | ||
| 168 | * a_type *t = ql_first(a_head); | ||
| 169 | * ql_remove((a_head), t, a_field); | ||
| 170 | * ql_tail_insert((a_head), t, a_field); | ||
| 171 | */ | ||
| 172 | #define ql_rotate(a_head, a_field) do { \ | ||
| 173 | ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ | ||
| 174 | } while (0) | ||
| 175 | |||
| 176 | /* | ||
| 177 | * Helper macro to iterate over each element in a list in order, starting from | ||
| 178 | * the head (or in reverse order, starting from the tail). The usage is | ||
| 179 | * (assuming my_t and my_list_t defined as above). | ||
| 180 | * | ||
| 181 | * int sum(my_list_t *list) { | ||
| 182 | * int sum = 0; | ||
| 183 | * my_t *iter; | ||
| 184 | * ql_foreach(iter, list, link) { | ||
| 185 | * sum += iter->data; | ||
| 186 | * } | ||
| 187 | * return sum; | ||
| 188 | * } | ||
| 189 | */ | ||
| 190 | |||
| 191 | #define ql_foreach(a_var, a_head, a_field) \ | ||
| 192 | qr_foreach((a_var), ql_first(a_head), a_field) | ||
| 193 | |||
| 194 | #define ql_reverse_foreach(a_var, a_head, a_field) \ | ||
| 195 | qr_reverse_foreach((a_var), ql_first(a_head), a_field) | ||
| 196 | |||
| 197 | #endif /* JEMALLOC_INTERNAL_QL_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/qr.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/qr.h deleted file mode 100644 index ece4f55..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/qr.h +++ /dev/null | |||
| @@ -1,140 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_QR_H | ||
| 2 | #define JEMALLOC_INTERNAL_QR_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * A ring implementation based on an embedded circular doubly-linked list. | ||
| 6 | * | ||
| 7 | * You define your struct like so: | ||
| 8 | * | ||
| 9 | * typedef struct my_s my_t; | ||
| 10 | * struct my_s { | ||
| 11 | * int data; | ||
| 12 | * qr(my_t) my_link; | ||
| 13 | * }; | ||
| 14 | * | ||
| 15 | * And then pass a my_t * into macros for a_qr arguments, and the token | ||
| 16 | * "my_link" into a_field fields. | ||
| 17 | */ | ||
| 18 | |||
| 19 | /* Ring definitions. */ | ||
| 20 | #define qr(a_type) \ | ||
| 21 | struct { \ | ||
| 22 | a_type *qre_next; \ | ||
| 23 | a_type *qre_prev; \ | ||
| 24 | } | ||
| 25 | |||
| 26 | /* | ||
| 27 | * Initialize a qr link. Every link must be initialized before being used, even | ||
| 28 | * if that initialization is going to be immediately overwritten (say, by being | ||
| 29 | * passed into an insertion macro). | ||
| 30 | */ | ||
| 31 | #define qr_new(a_qr, a_field) do { \ | ||
| 32 | (a_qr)->a_field.qre_next = (a_qr); \ | ||
| 33 | (a_qr)->a_field.qre_prev = (a_qr); \ | ||
| 34 | } while (0) | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Go forwards or backwards in the ring. Note that (the ring being circular), this | ||
| 38 | * always succeeds -- you just keep looping around and around the ring if you | ||
| 39 | * chase pointers without end. | ||
| 40 | */ | ||
| 41 | #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) | ||
| 42 | #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Given two rings: | ||
| 46 | * a -> a_1 -> ... -> a_n -- | ||
| 47 | * ^ | | ||
| 48 | * |------------------------ | ||
| 49 | * | ||
| 50 | * b -> b_1 -> ... -> b_n -- | ||
| 51 | * ^ | | ||
| 52 | * |------------------------ | ||
| 53 | * | ||
| 54 | * Results in the ring: | ||
| 55 | * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n -- | ||
| 56 | * ^ | | ||
| 57 | * |-------------------------------------------------| | ||
| 58 | * | ||
| 59 | * a_qr_a can directly be a qr_next() macro, but a_qr_b cannot. | ||
| 60 | */ | ||
| 61 | #define qr_meld(a_qr_a, a_qr_b, a_field) do { \ | ||
| 62 | (a_qr_b)->a_field.qre_prev->a_field.qre_next = \ | ||
| 63 | (a_qr_a)->a_field.qre_prev; \ | ||
| 64 | (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ | ||
| 65 | (a_qr_b)->a_field.qre_prev = \ | ||
| 66 | (a_qr_b)->a_field.qre_prev->a_field.qre_next; \ | ||
| 67 | (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ | ||
| 68 | (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ | ||
| 69 | } while (0) | ||
| 70 | |||
| 71 | /* | ||
| 72 | * Logically, this is just a meld. The intent, though, is that a_qrelm is a | ||
| 73 | * single-element ring, so that "before" has a more obvious interpretation than | ||
| 74 | * meld. | ||
| 75 | */ | ||
| 76 | #define qr_before_insert(a_qrelm, a_qr, a_field) \ | ||
| 77 | qr_meld((a_qrelm), (a_qr), a_field) | ||
| 78 | |||
| 79 | /* Ditto, but inserting after rather than before. */ | ||
| 80 | #define qr_after_insert(a_qrelm, a_qr, a_field) \ | ||
| 81 | qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field) | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Inverts meld; given the ring: | ||
| 85 | * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n -- | ||
| 86 | * ^ | | ||
| 87 | * |-------------------------------------------------| | ||
| 88 | * | ||
| 89 | * Results in two rings: | ||
| 90 | * a -> a_1 -> ... -> a_n -- | ||
| 91 | * ^ | | ||
| 92 | * |------------------------ | ||
| 93 | * | ||
| 94 | * b -> b_1 -> ... -> b_n -- | ||
| 95 | * ^ | | ||
| 96 | * |------------------------ | ||
| 97 | * | ||
| 98 | * qr_meld() and qr_split() are functionally equivalent, so there's no need to | ||
| 99 | * have two copies of the code. | ||
| 100 | */ | ||
| 101 | #define qr_split(a_qr_a, a_qr_b, a_field) \ | ||
| 102 | qr_meld((a_qr_a), (a_qr_b), a_field) | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Splits off a_qr from the rest of its ring, so that it becomes a | ||
| 106 | * single-element ring. | ||
| 107 | */ | ||
| 108 | #define qr_remove(a_qr, a_field) \ | ||
| 109 | qr_split(qr_next(a_qr, a_field), (a_qr), a_field) | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Helper macro to iterate over each element in a ring exactly once, starting | ||
| 113 | * with a_qr. The usage is (assuming my_t defined as above): | ||
| 114 | * | ||
| 115 | * int sum(my_t *item) { | ||
| 116 | * int sum = 0; | ||
| 117 | * my_t *iter; | ||
| 118 | * qr_foreach(iter, item, link) { | ||
| 119 | * sum += iter->data; | ||
| 120 | * } | ||
| 121 | * return sum; | ||
| 122 | * } | ||
| 123 | */ | ||
| 124 | #define qr_foreach(var, a_qr, a_field) \ | ||
| 125 | for ((var) = (a_qr); \ | ||
| 126 | (var) != NULL; \ | ||
| 127 | (var) = (((var)->a_field.qre_next != (a_qr)) \ | ||
| 128 | ? (var)->a_field.qre_next : NULL)) | ||
| 129 | |||
| 130 | /* | ||
| 131 | * The same (and with the same usage) as qr_foreach, but in the opposite order, | ||
| 132 | * ending with a_qr. | ||
| 133 | */ | ||
| 134 | #define qr_reverse_foreach(var, a_qr, a_field) \ | ||
| 135 | for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ | ||
| 136 | (var) != NULL; \ | ||
| 137 | (var) = (((var) != (a_qr)) \ | ||
| 138 | ? (var)->a_field.qre_prev : NULL)) | ||
| 139 | |||
| 140 | #endif /* JEMALLOC_INTERNAL_QR_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/quantum.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/quantum.h deleted file mode 100644 index c22d753..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/quantum.h +++ /dev/null | |||
| @@ -1,87 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_QUANTUM_H | ||
| 2 | #define JEMALLOC_INTERNAL_QUANTUM_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size | ||
| 6 | * classes). | ||
| 7 | */ | ||
| 8 | #ifndef LG_QUANTUM | ||
| 9 | # if (defined(__i386__) || defined(_M_IX86)) | ||
| 10 | # define LG_QUANTUM 4 | ||
| 11 | # endif | ||
| 12 | # ifdef __ia64__ | ||
| 13 | # define LG_QUANTUM 4 | ||
| 14 | # endif | ||
| 15 | # ifdef __alpha__ | ||
| 16 | # define LG_QUANTUM 4 | ||
| 17 | # endif | ||
| 18 | # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) | ||
| 19 | # define LG_QUANTUM 4 | ||
| 20 | # endif | ||
| 21 | # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) | ||
| 22 | # define LG_QUANTUM 4 | ||
| 23 | # endif | ||
| 24 | # ifdef __arm__ | ||
| 25 | # define LG_QUANTUM 3 | ||
| 26 | # endif | ||
| 27 | # ifdef __aarch64__ | ||
| 28 | # define LG_QUANTUM 4 | ||
| 29 | # endif | ||
| 30 | # ifdef __hppa__ | ||
| 31 | # define LG_QUANTUM 4 | ||
| 32 | # endif | ||
| 33 | # ifdef __loongarch__ | ||
| 34 | # define LG_QUANTUM 4 | ||
| 35 | # endif | ||
| 36 | # ifdef __m68k__ | ||
| 37 | # define LG_QUANTUM 3 | ||
| 38 | # endif | ||
| 39 | # ifdef __mips__ | ||
| 40 | # if defined(__mips_n32) || defined(__mips_n64) | ||
| 41 | # define LG_QUANTUM 4 | ||
| 42 | # else | ||
| 43 | # define LG_QUANTUM 3 | ||
| 44 | # endif | ||
| 45 | # endif | ||
| 46 | # ifdef __nios2__ | ||
| 47 | # define LG_QUANTUM 3 | ||
| 48 | # endif | ||
| 49 | # ifdef __or1k__ | ||
| 50 | # define LG_QUANTUM 3 | ||
| 51 | # endif | ||
| 52 | # ifdef __powerpc__ | ||
| 53 | # define LG_QUANTUM 4 | ||
| 54 | # endif | ||
| 55 | # if defined(__riscv) || defined(__riscv__) | ||
| 56 | # define LG_QUANTUM 4 | ||
| 57 | # endif | ||
| 58 | # ifdef __s390__ | ||
| 59 | # define LG_QUANTUM 4 | ||
| 60 | # endif | ||
| 61 | # if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \ | ||
| 62 | defined(__SH4_SINGLE_ONLY__)) | ||
| 63 | # define LG_QUANTUM 4 | ||
| 64 | # endif | ||
| 65 | # ifdef __tile__ | ||
| 66 | # define LG_QUANTUM 4 | ||
| 67 | # endif | ||
| 68 | # ifdef __le32__ | ||
| 69 | # define LG_QUANTUM 4 | ||
| 70 | # endif | ||
| 71 | # ifdef __arc__ | ||
| 72 | # define LG_QUANTUM 3 | ||
| 73 | # endif | ||
| 74 | # ifndef LG_QUANTUM | ||
| 75 | # error "Unknown minimum alignment for architecture; specify via " | ||
| 76 | "--with-lg-quantum" | ||
| 77 | # endif | ||
| 78 | #endif | ||
| 79 | |||
| 80 | #define QUANTUM ((size_t)(1U << LG_QUANTUM)) | ||
| 81 | #define QUANTUM_MASK (QUANTUM - 1) | ||
| 82 | |||
| 83 | /* Return the smallest quantum multiple that is >= a. */ | ||
| 84 | #define QUANTUM_CEILING(a) \ | ||
| 85 | (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) | ||
| 86 | |||
| 87 | #endif /* JEMALLOC_INTERNAL_QUANTUM_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rb.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rb.h deleted file mode 100644 index a9a51cb..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rb.h +++ /dev/null | |||
| @@ -1,1856 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_RB_H | ||
| 2 | #define JEMALLOC_INTERNAL_RB_H | ||
| 3 | |||
| 4 | /*- | ||
| 5 | ******************************************************************************* | ||
| 6 | * | ||
| 7 | * cpp macro implementation of left-leaning 2-3 red-black trees. Parent | ||
| 8 | * pointers are not used, and color bits are stored in the least significant | ||
| 9 | * bit of right-child pointers (if RB_COMPACT is defined), thus making node | ||
| 10 | * linkage as compact as is possible for red-black trees. | ||
| 11 | * | ||
| 12 | * Usage: | ||
| 13 | * | ||
| 14 | * #include <stdint.h> | ||
| 15 | * #include <stdbool.h> | ||
| 16 | * #define NDEBUG // (Optional, see assert(3).) | ||
| 17 | * #include <assert.h> | ||
| 18 | * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) | ||
| 19 | * #include <rb.h> | ||
| 20 | * ... | ||
| 21 | * | ||
| 22 | ******************************************************************************* | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef __PGI | ||
| 26 | #define RB_COMPACT | ||
| 27 | #endif | ||
| 28 | |||
| 29 | /* | ||
| 30 | * Each node in the RB tree consumes at least 1 byte of space (for the linkage | ||
| 31 | * if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes | ||
| 32 | * in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree). | ||
| 33 | * The choice of algorithm bounds the depth of a tree to twice the binary log of | ||
| 34 | * the number of elements in the tree; the following bound follows. | ||
| 35 | */ | ||
| 36 | #define RB_MAX_DEPTH (sizeof(void *) << 4) | ||
| 37 | |||
| 38 | #ifdef RB_COMPACT | ||
| 39 | /* Node structure. */ | ||
| 40 | #define rb_node(a_type) \ | ||
| 41 | struct { \ | ||
| 42 | a_type *rbn_left; \ | ||
| 43 | a_type *rbn_right_red; \ | ||
| 44 | } | ||
| 45 | #else | ||
| 46 | #define rb_node(a_type) \ | ||
| 47 | struct { \ | ||
| 48 | a_type *rbn_left; \ | ||
| 49 | a_type *rbn_right; \ | ||
| 50 | bool rbn_red; \ | ||
| 51 | } | ||
| 52 | #endif | ||
| 53 | |||
| 54 | /* Root structure. */ | ||
| 55 | #define rb_tree(a_type) \ | ||
| 56 | struct { \ | ||
| 57 | a_type *rbt_root; \ | ||
| 58 | } | ||
| 59 | |||
| 60 | /* Left accessors. */ | ||
| 61 | #define rbtn_left_get(a_type, a_field, a_node) \ | ||
| 62 | ((a_node)->a_field.rbn_left) | ||
| 63 | #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ | ||
| 64 | (a_node)->a_field.rbn_left = a_left; \ | ||
| 65 | } while (0) | ||
| 66 | |||
| 67 | #ifdef RB_COMPACT | ||
| 68 | /* Right accessors. */ | ||
| 69 | #define rbtn_right_get(a_type, a_field, a_node) \ | ||
| 70 | ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ | ||
| 71 | & ((ssize_t)-2))) | ||
| 72 | #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ | ||
| 73 | (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | ||
| 74 | | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ | ||
| 75 | } while (0) | ||
| 76 | |||
| 77 | /* Color accessors. */ | ||
| 78 | #define rbtn_red_get(a_type, a_field, a_node) \ | ||
| 79 | ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ | ||
| 80 | & ((size_t)1))) | ||
| 81 | #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ | ||
| 82 | (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ | ||
| 83 | (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ||
| 84 | | ((ssize_t)a_red)); \ | ||
| 85 | } while (0) | ||
| 86 | #define rbtn_red_set(a_type, a_field, a_node) do { \ | ||
| 87 | (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ | ||
| 88 | (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ | ||
| 89 | } while (0) | ||
| 90 | #define rbtn_black_set(a_type, a_field, a_node) do { \ | ||
| 91 | (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ | ||
| 92 | (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ | ||
| 93 | } while (0) | ||
| 94 | |||
| 95 | /* Node initializer. */ | ||
| 96 | #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ | ||
| 97 | /* Bookkeeping bit cannot be used by node pointer. */ \ | ||
| 98 | assert(((uintptr_t)(a_node) & 0x1) == 0); \ | ||
| 99 | rbtn_left_set(a_type, a_field, (a_node), NULL); \ | ||
| 100 | rbtn_right_set(a_type, a_field, (a_node), NULL); \ | ||
| 101 | rbtn_red_set(a_type, a_field, (a_node)); \ | ||
| 102 | } while (0) | ||
| 103 | #else | ||
| 104 | /* Right accessors. */ | ||
| 105 | #define rbtn_right_get(a_type, a_field, a_node) \ | ||
| 106 | ((a_node)->a_field.rbn_right) | ||
| 107 | #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ | ||
| 108 | (a_node)->a_field.rbn_right = a_right; \ | ||
| 109 | } while (0) | ||
| 110 | |||
| 111 | /* Color accessors. */ | ||
| 112 | #define rbtn_red_get(a_type, a_field, a_node) \ | ||
| 113 | ((a_node)->a_field.rbn_red) | ||
| 114 | #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ | ||
| 115 | (a_node)->a_field.rbn_red = (a_red); \ | ||
| 116 | } while (0) | ||
| 117 | #define rbtn_red_set(a_type, a_field, a_node) do { \ | ||
| 118 | (a_node)->a_field.rbn_red = true; \ | ||
| 119 | } while (0) | ||
| 120 | #define rbtn_black_set(a_type, a_field, a_node) do { \ | ||
| 121 | (a_node)->a_field.rbn_red = false; \ | ||
| 122 | } while (0) | ||
| 123 | |||
| 124 | /* Node initializer. */ | ||
| 125 | #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ | ||
| 126 | rbtn_left_set(a_type, a_field, (a_node), NULL); \ | ||
| 127 | rbtn_right_set(a_type, a_field, (a_node), NULL); \ | ||
| 128 | rbtn_red_set(a_type, a_field, (a_node)); \ | ||
| 129 | } while (0) | ||
| 130 | #endif | ||
| 131 | |||
| 132 | /* Tree initializer. */ | ||
| 133 | #define rb_new(a_type, a_field, a_rbt) do { \ | ||
| 134 | (a_rbt)->rbt_root = NULL; \ | ||
| 135 | } while (0) | ||
| 136 | |||
| 137 | /* Internal utility macros. */ | ||
| 138 | #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ | ||
| 139 | (r_node) = (a_root); \ | ||
| 140 | if ((r_node) != NULL) { \ | ||
| 141 | for (; \ | ||
| 142 | rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ | ||
| 143 | (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ | ||
| 144 | } \ | ||
| 145 | } \ | ||
| 146 | } while (0) | ||
| 147 | |||
| 148 | #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ | ||
| 149 | (r_node) = (a_root); \ | ||
| 150 | if ((r_node) != NULL) { \ | ||
| 151 | for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ | ||
| 152 | (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ | ||
| 153 | } \ | ||
| 154 | } \ | ||
| 155 | } while (0) | ||
| 156 | |||
| 157 | #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ | ||
| 158 | (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ | ||
| 159 | rbtn_right_set(a_type, a_field, (a_node), \ | ||
| 160 | rbtn_left_get(a_type, a_field, (r_node))); \ | ||
| 161 | rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ | ||
| 162 | } while (0) | ||
| 163 | |||
| 164 | #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ | ||
| 165 | (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ | ||
| 166 | rbtn_left_set(a_type, a_field, (a_node), \ | ||
| 167 | rbtn_right_get(a_type, a_field, (r_node))); \ | ||
| 168 | rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ | ||
| 169 | } while (0) | ||
| 170 | |||
| 171 | #define rb_summarized_only_false(...) | ||
| 172 | #define rb_summarized_only_true(...) __VA_ARGS__ | ||
| 173 | #define rb_empty_summarize(a_node, a_lchild, a_rchild) false | ||
| 174 | |||
| 175 | /* | ||
| 176 | * The rb_proto() and rb_summarized_proto() macros generate function prototypes | ||
| 177 | * that correspond to the functions generated by an equivalently parameterized | ||
| 178 | * call to rb_gen() or rb_summarized_gen(), respectively. | ||
| 179 | */ | ||
| 180 | |||
| 181 | #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ | ||
| 182 | rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false) | ||
| 183 | #define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type) \ | ||
| 184 | rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true) | ||
| 185 | #define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, \ | ||
| 186 | a_is_summarized) \ | ||
| 187 | a_attr void \ | ||
| 188 | a_prefix##new(a_rbt_type *rbtree); \ | ||
| 189 | a_attr bool \ | ||
| 190 | a_prefix##empty(a_rbt_type *rbtree); \ | ||
| 191 | a_attr a_type * \ | ||
| 192 | a_prefix##first(a_rbt_type *rbtree); \ | ||
| 193 | a_attr a_type * \ | ||
| 194 | a_prefix##last(a_rbt_type *rbtree); \ | ||
| 195 | a_attr a_type * \ | ||
| 196 | a_prefix##next(a_rbt_type *rbtree, a_type *node); \ | ||
| 197 | a_attr a_type * \ | ||
| 198 | a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ | ||
| 199 | a_attr a_type * \ | ||
| 200 | a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ | ||
| 201 | a_attr a_type * \ | ||
| 202 | a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ | ||
| 203 | a_attr a_type * \ | ||
| 204 | a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ | ||
| 205 | a_attr void \ | ||
| 206 | a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ | ||
| 207 | a_attr void \ | ||
| 208 | a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ | ||
| 209 | a_attr a_type * \ | ||
| 210 | a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ | ||
| 211 | a_rbt_type *, a_type *, void *), void *arg); \ | ||
| 212 | a_attr a_type * \ | ||
| 213 | a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ | ||
| 214 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ | ||
| 215 | a_attr void \ | ||
| 216 | a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ | ||
| 217 | void *arg); \ | ||
| 218 | /* Extended API */ \ | ||
| 219 | rb_summarized_only_##a_is_summarized( \ | ||
| 220 | a_attr void \ | ||
| 221 | a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node); \ | ||
| 222 | a_attr bool \ | ||
| 223 | a_prefix##empty_filtered(a_rbt_type *rbtree, \ | ||
| 224 | bool (*filter_node)(void *, a_type *), \ | ||
| 225 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 226 | void *filter_ctx); \ | ||
| 227 | a_attr a_type * \ | ||
| 228 | a_prefix##first_filtered(a_rbt_type *rbtree, \ | ||
| 229 | bool (*filter_node)(void *, a_type *), \ | ||
| 230 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 231 | void *filter_ctx); \ | ||
| 232 | a_attr a_type * \ | ||
| 233 | a_prefix##last_filtered(a_rbt_type *rbtree, \ | ||
| 234 | bool (*filter_node)(void *, a_type *), \ | ||
| 235 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 236 | void *filter_ctx); \ | ||
| 237 | a_attr a_type * \ | ||
| 238 | a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \ | ||
| 239 | bool (*filter_node)(void *, a_type *), \ | ||
| 240 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 241 | void *filter_ctx); \ | ||
| 242 | a_attr a_type * \ | ||
| 243 | a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \ | ||
| 244 | bool (*filter_node)(void *, a_type *), \ | ||
| 245 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 246 | void *filter_ctx); \ | ||
| 247 | a_attr a_type * \ | ||
| 248 | a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \ | ||
| 249 | bool (*filter_node)(void *, a_type *), \ | ||
| 250 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 251 | void *filter_ctx); \ | ||
| 252 | a_attr a_type * \ | ||
| 253 | a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \ | ||
| 254 | bool (*filter_node)(void *, a_type *), \ | ||
| 255 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 256 | void *filter_ctx); \ | ||
| 257 | a_attr a_type * \ | ||
| 258 | a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \ | ||
| 259 | bool (*filter_node)(void *, a_type *), \ | ||
| 260 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 261 | void *filter_ctx); \ | ||
| 262 | a_attr a_type * \ | ||
| 263 | a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \ | ||
| 264 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ | ||
| 265 | bool (*filter_node)(void *, a_type *), \ | ||
| 266 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 267 | void *filter_ctx); \ | ||
| 268 | a_attr a_type * \ | ||
| 269 | a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \ | ||
| 270 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ | ||
| 271 | bool (*filter_node)(void *, a_type *), \ | ||
| 272 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 273 | void *filter_ctx); \ | ||
| 274 | ) | ||
| 275 | |||
| 276 | /* | ||
| 277 | * The rb_gen() macro generates a type-specific red-black tree implementation, | ||
| 278 | * based on the above cpp macros. | ||
| 279 | * Arguments: | ||
| 280 | * | ||
| 281 | * a_attr: | ||
| 282 | * Function attribute for generated functions (ex: static). | ||
| 283 | * a_prefix: | ||
| 284 | * Prefix for generated functions (ex: ex_). | ||
| 285 | * a_rb_type: | ||
| 286 | * Type for red-black tree data structure (ex: ex_t). | ||
| 287 | * a_type: | ||
| 288 | * Type for red-black tree node data structure (ex: ex_node_t). | ||
| 289 | * a_field: | ||
| 290 | * Name of red-black tree node linkage (ex: ex_link). | ||
| 291 | * a_cmp: | ||
| 292 | * Node comparison function name, with the following prototype: | ||
| 293 | * | ||
| 294 | * int a_cmp(a_type *a_node, a_type *a_other); | ||
| 295 | * ^^^^^^ | ||
| 296 | * or a_key | ||
| 297 | * Interpretation of comparison function return values: | ||
| 298 | * -1 : a_node < a_other | ||
| 299 | * 0 : a_node == a_other | ||
| 300 | * 1 : a_node > a_other | ||
| 301 | * In all cases, the a_node or a_key macro argument is the first argument to | ||
| 302 | * the comparison function, which makes it possible to write comparison | ||
| 303 | * functions that treat the first argument specially. a_cmp must be a total | ||
| 304 | * order on values inserted into the tree -- duplicates are not allowed. | ||
| 305 | * | ||
| 306 | * Assuming the following setup: | ||
| 307 | * | ||
| 308 | * typedef struct ex_node_s ex_node_t; | ||
| 309 | * struct ex_node_s { | ||
| 310 | * rb_node(ex_node_t) ex_link; | ||
| 311 | * }; | ||
| 312 | * typedef rb_tree(ex_node_t) ex_t; | ||
| 313 | * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) | ||
| 314 | * | ||
| 315 | * The following API is generated: | ||
| 316 | * | ||
| 317 | * static void | ||
| 318 | * ex_new(ex_t *tree); | ||
| 319 | * Description: Initialize a red-black tree structure. | ||
| 320 | * Args: | ||
| 321 | * tree: Pointer to an uninitialized red-black tree object. | ||
| 322 | * | ||
| 323 | * static bool | ||
| 324 | * ex_empty(ex_t *tree); | ||
| 325 | * Description: Determine whether tree is empty. | ||
| 326 | * Args: | ||
| 327 | * tree: Pointer to an initialized red-black tree object. | ||
| 328 | * Ret: True if tree is empty, false otherwise. | ||
| 329 | * | ||
| 330 | * static ex_node_t * | ||
| 331 | * ex_first(ex_t *tree); | ||
| 332 | * static ex_node_t * | ||
| 333 | * ex_last(ex_t *tree); | ||
| 334 | * Description: Get the first/last node in tree. | ||
| 335 | * Args: | ||
| 336 | * tree: Pointer to an initialized red-black tree object. | ||
| 337 | * Ret: First/last node in tree, or NULL if tree is empty. | ||
| 338 | * | ||
| 339 | * static ex_node_t * | ||
| 340 | * ex_next(ex_t *tree, ex_node_t *node); | ||
| 341 | * static ex_node_t * | ||
| 342 | * ex_prev(ex_t *tree, ex_node_t *node); | ||
| 343 | * Description: Get node's successor/predecessor. | ||
| 344 | * Args: | ||
| 345 | * tree: Pointer to an initialized red-black tree object. | ||
| 346 | * node: A node in tree. | ||
| 347 | * Ret: node's successor/predecessor in tree, or NULL if node is | ||
| 348 | * last/first. | ||
| 349 | * | ||
| 350 | * static ex_node_t * | ||
| 351 | * ex_search(ex_t *tree, const ex_node_t *key); | ||
| 352 | * Description: Search for node that matches key. | ||
| 353 | * Args: | ||
| 354 | * tree: Pointer to an initialized red-black tree object. | ||
| 355 | * key : Search key. | ||
| 356 | * Ret: Node in tree that matches key, or NULL if no match. | ||
| 357 | * | ||
| 358 | * static ex_node_t * | ||
| 359 | * ex_nsearch(ex_t *tree, const ex_node_t *key); | ||
| 360 | * static ex_node_t * | ||
| 361 | * ex_psearch(ex_t *tree, const ex_node_t *key); | ||
| 362 | * Description: Search for node that matches key. If no match is found, | ||
| 363 | * return what would be key's successor/predecessor, were | ||
| 364 | * key in tree. | ||
| 365 | * Args: | ||
| 366 | * tree: Pointer to an initialized red-black tree object. | ||
| 367 | * key : Search key. | ||
| 368 | * Ret: Node in tree that matches key, or if no match, hypothetical node's | ||
| 369 | * successor/predecessor (NULL if no successor/predecessor). | ||
| 370 | * | ||
| 371 | * static void | ||
| 372 | * ex_insert(ex_t *tree, ex_node_t *node); | ||
| 373 | * Description: Insert node into tree. | ||
| 374 | * Args: | ||
| 375 | * tree: Pointer to an initialized red-black tree object. | ||
| 376 | * node: Node to be inserted into tree. | ||
| 377 | * | ||
| 378 | * static void | ||
| 379 | * ex_remove(ex_t *tree, ex_node_t *node); | ||
| 380 | * Description: Remove node from tree. | ||
| 381 | * Args: | ||
| 382 | * tree: Pointer to an initialized red-black tree object. | ||
| 383 | * node: Node in tree to be removed. | ||
| 384 | * | ||
| 385 | * static ex_node_t * | ||
| 386 | * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, | ||
| 387 | * ex_node_t *, void *), void *arg); | ||
| 388 | * static ex_node_t * | ||
| 389 | * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, | ||
| 390 | * ex_node_t *, void *), void *arg); | ||
| 391 | * Description: Iterate forward/backward over tree, starting at node. If | ||
| 392 | * tree is modified, iteration must be immediately | ||
| 393 | * terminated by the callback function that causes the | ||
| 394 | * modification. | ||
| 395 | * Args: | ||
| 396 | * tree : Pointer to an initialized red-black tree object. | ||
| 397 | * start: Node at which to start iteration, or NULL to start at | ||
| 398 | * first/last node. | ||
| 399 | * cb : Callback function, which is called for each node during | ||
| 400 | * iteration. Under normal circumstances the callback function | ||
| 401 | * should return NULL, which causes iteration to continue. If a | ||
| 402 | * callback function returns non-NULL, iteration is immediately | ||
| 403 | * terminated and the non-NULL return value is returned by the | ||
| 404 | * iterator. This is useful for re-starting iteration after | ||
| 405 | * modifying tree. | ||
| 406 | * arg : Opaque pointer passed to cb(). | ||
| 407 | * Ret: NULL if iteration completed, or the non-NULL callback return value | ||
| 408 | * that caused termination of the iteration. | ||
| 409 | * | ||
| 410 | * static void | ||
| 411 | * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); | ||
| 412 | * Description: Iterate over the tree with post-order traversal, remove | ||
| 413 | * each node, and run the callback if non-null. This is | ||
| 414 | * used for destroying a tree without paying the cost to | ||
| 415 | * rebalance it. The tree must not be otherwise altered | ||
| 416 | * during traversal. | ||
| 417 | * Args: | ||
| 418 | * tree: Pointer to an initialized red-black tree object. | ||
| 419 | * cb : Callback function, which, if non-null, is called for each node | ||
| 420 | * during iteration. There is no way to stop iteration once it | ||
| 421 | * has begun. | ||
| 422 | * arg : Opaque pointer passed to cb(). | ||
| 423 | * | ||
| 424 | * The rb_summarized_gen() macro generates all the functions above, but has an | ||
| 425 | * expanded interface. In introduces the notion of summarizing subtrees, and of | ||
| 426 | * filtering searches in the tree according to the information contained in | ||
| 427 | * those summaries. | ||
| 428 | * The extra macro argument is: | ||
| 429 | * a_summarize: | ||
| 430 | * Tree summarization function name, with the following prototype: | ||
| 431 | * | ||
| 432 | * bool a_summarize(a_type *a_node, const a_type *a_left_child, | ||
| 433 | * const a_type *a_right_child); | ||
| 434 | * | ||
| 435 | * This function should update a_node with the summary of the subtree rooted | ||
| 436 | * there, using the data contained in it and the summaries in a_left_child | ||
| 437 | * and a_right_child. One or both of them may be NULL. When the tree | ||
| 438 | * changes due to an insertion or removal, it updates the summaries of all | ||
| 439 | * nodes whose subtrees have changed (always updating the summaries of | ||
| 440 | * children before their parents). If the user alters a node in the tree in | ||
| 441 | * a way that may change its summary, they can call the generated | ||
| 442 | * update_summaries function to bubble up the summary changes to the root. | ||
| 443 | * It should return true if the summary changed (or may have changed), and | ||
| 444 | * false if it didn't (which will allow the implementation to terminate | ||
| 445 | * "bubbling up" the summaries early). | ||
| 446 | * As the parameter names indicate, the children are ordered as they are in | ||
| 447 | * the tree, a_left_child, if it is not NULL, compares less than a_node, | ||
| 448 | * which in turn compares less than a_right_child (if a_right_child is not | ||
| 449 | * NULL). | ||
| 450 | * | ||
| 451 | * Using the same setup as above but replacing the macro with | ||
| 452 | * rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp, | ||
| 453 | * ex_summarize) | ||
| 454 | * | ||
| 455 | * Generates all the previous functions, but adds some more: | ||
| 456 | * | ||
| 457 | * static void | ||
| 458 | * ex_update_summaries(ex_t *tree, ex_node_t *node); | ||
| 459 | * Description: Recompute all summaries of ancestors of node. | ||
| 460 | * Args: | ||
| 461 | * tree: Pointer to an initialized red-black tree object. | ||
| 462 | * node: The element of the tree whose summary may have changed. | ||
| 463 | * | ||
| 464 | * For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search, | ||
| 465 | * ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function | ||
| 466 | * is generated as well, with the suffix _filtered (e.g. ex_empty_filtered, | ||
| 467 | * ex_first_filtered, etc.). These use the concept of a "filter"; a binary | ||
| 468 | * property some node either satisfies or does not satisfy. Clever use of the | ||
| 469 | * a_summary argument to rb_summarized_gen can allow efficient computation of | ||
| 470 | * these predicates across whole subtrees of the tree. | ||
| 471 | * The extended API functions accept three additional arguments after the | ||
| 472 | * arguments to the corresponding non-extended equivalent. | ||
| 473 | * | ||
| 474 | * ex_fn(..., bool (*filter_node)(void *, ex_node_t *), | ||
| 475 | * bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx); | ||
| 476 | * filter_node : Returns true if the node passes the filter. | ||
| 477 | * filter_subtree : Returns true if some node in the subtree rooted at | ||
| 478 | * node passes the filter. | ||
| 479 | * filter_ctx : A context argument passed to the filters. | ||
| 480 | * | ||
| 481 | * For a more concrete example of summarizing and filtering, suppose we're using | ||
| 482 | * the red-black tree to track a set of integers: | ||
| 483 | * | ||
| 484 | * struct ex_node_s { | ||
| 485 | * rb_node(ex_node_t) ex_link; | ||
| 486 | * unsigned data; | ||
| 487 | * }; | ||
| 488 | * | ||
| 489 | * Suppose, for some application-specific reason, we want to be able to quickly | ||
| 490 | * find numbers in the set which are divisible by large powers of 2 (say, for | ||
| 491 | * aligned allocation purposes). We augment the node with a summary field: | ||
| 492 | * | ||
| 493 | * struct ex_node_s { | ||
| 494 | * rb_node(ex_node_t) ex_link; | ||
| 495 | * unsigned data; | ||
| 496 | * unsigned max_subtree_ffs; | ||
| 497 | * } | ||
| 498 | * | ||
| 499 | * and define our summarization function as follows: | ||
| 500 | * | ||
| 501 | * bool | ||
| 502 | * ex_summarize(ex_node_t *node, const ex_node_t *lchild, | ||
| 503 | * const ex_node_t *rchild) { | ||
| 504 | * unsigned new_max_subtree_ffs = ffs(node->data); | ||
| 505 | * if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) { | ||
| 506 | * new_max_subtree_ffs = lchild->max_subtree_ffs; | ||
| 507 | * } | ||
| 508 | * if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) { | ||
| 509 | * new_max_subtree_ffs = rchild->max_subtree_ffs; | ||
| 510 | * } | ||
| 511 | * bool changed = (node->max_subtree_ffs != new_max_subtree_ffs) | ||
| 512 | * node->max_subtree_ffs = new_max_subtree_ffs; | ||
| 513 | * // This could be "return true" without any correctness or big-O | ||
| 514 | * // performance changes; but practically, precisely reporting summary | ||
| 515 | * // changes reduces the amount of work that has to be done when "bubbling | ||
| 516 | * // up" summary changes. | ||
| 517 | * return changed; | ||
| 518 | * } | ||
| 519 | * | ||
| 520 | * We can now implement our filter functions as follows: | ||
| 521 | * bool | ||
| 522 | * ex_filter_node(void *filter_ctx, ex_node_t *node) { | ||
| 523 | * unsigned required_ffs = *(unsigned *)filter_ctx; | ||
| 524 | * return ffs(node->data) >= required_ffs; | ||
| 525 | * } | ||
| 526 | * bool | ||
| 527 | * ex_filter_subtree(void *filter_ctx, ex_node_t *node) { | ||
| 528 | * unsigned required_ffs = *(unsigned *)filter_ctx; | ||
| 529 | * return node->max_subtree_ffs >= required_ffs; | ||
| 530 | * } | ||
| 531 | * | ||
| 532 | * We can now easily search for, e.g., the smallest integer in the set that's | ||
| 533 | * divisible by 128: | ||
| 534 | * ex_node_t * | ||
| 535 | * find_div_128(ex_tree_t *tree) { | ||
| 536 | * unsigned min_ffs = 7; | ||
| 537 | * return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree, | ||
| 538 | * &min_ffs); | ||
| 539 | * } | ||
| 540 | * | ||
| 541 | * We could with similar ease: | ||
| 542 | * - Fnd the next multiple of 128 in the set that's larger than 12345 (with | ||
| 543 | * ex_nsearch_filtered) | ||
| 544 | * - Iterate over just those multiples of 64 that are in the set (with | ||
| 545 | * ex_iter_filtered) | ||
| 546 | * - Determine if the set contains any multiples of 1024 (with | ||
| 547 | * ex_empty_filtered). | ||
| 548 | * | ||
| 549 | * Some possibly subtle API notes: | ||
| 550 | * - The node argument to ex_next_filtered and ex_prev_filtered need not pass | ||
| 551 | * the filter; it will find the next/prev node that passes the filter. | ||
| 552 | * - ex_search_filtered will fail even for a node in the tree, if that node does | ||
| 553 | * not pass the filter. ex_psearch_filtered and ex_nsearch_filtered behave | ||
| 554 | * similarly; they may return a node larger/smaller than the key, even if a | ||
| 555 | * node equivalent to the key is in the tree (but does not pass the filter). | ||
| 556 | * - Similarly, if the start argument to a filtered iteration function does not | ||
| 557 | * pass the filter, the callback won't be invoked on it. | ||
| 558 | * | ||
| 559 | * These should make sense after a moment's reflection; each post-condition is | ||
| 560 | * the same as with the unfiltered version, with the added constraint that the | ||
| 561 | * returned node must pass the filter. | ||
| 562 | */ | ||
| 563 | #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ | ||
| 564 | rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \ | ||
| 565 | rb_empty_summarize, false) | ||
| 566 | #define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type, \ | ||
| 567 | a_field, a_cmp, a_summarize) \ | ||
| 568 | rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \ | ||
| 569 | a_summarize, true) | ||
| 570 | |||
| 571 | #define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, \ | ||
| 572 | a_field, a_cmp, a_summarize, a_is_summarized) \ | ||
| 573 | typedef struct { \ | ||
| 574 | a_type *node; \ | ||
| 575 | int cmp; \ | ||
| 576 | } a_prefix##path_entry_t; \ | ||
| 577 | static inline void \ | ||
| 578 | a_prefix##summarize_range(a_prefix##path_entry_t *rfirst, \ | ||
| 579 | a_prefix##path_entry_t *rlast) { \ | ||
| 580 | while ((uintptr_t)rlast >= (uintptr_t)rfirst) { \ | ||
| 581 | a_type *node = rlast->node; \ | ||
| 582 | /* Avoid a warning when a_summarize is rb_empty_summarize. */ \ | ||
| 583 | (void)node; \ | ||
| 584 | bool changed = a_summarize(node, rbtn_left_get(a_type, a_field, \ | ||
| 585 | node), rbtn_right_get(a_type, a_field, node)); \ | ||
| 586 | if (!changed) { \ | ||
| 587 | break; \ | ||
| 588 | } \ | ||
| 589 | rlast--; \ | ||
| 590 | } \ | ||
| 591 | } \ | ||
| 592 | /* On the remove pathways, we sometimes swap the node being removed */\ | ||
| 593 | /* and its first successor; in such cases we need to do two range */\ | ||
| 594 | /* updates; one from the node to its (former) swapped successor, the */\ | ||
| 595 | /* next from that successor to the root (with either allowed to */\ | ||
| 596 | /* bail out early if appropriate. */\ | ||
| 597 | static inline void \ | ||
| 598 | a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst, \ | ||
| 599 | a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) { \ | ||
| 600 | if (swap_loc == NULL || rlast <= swap_loc) { \ | ||
| 601 | a_prefix##summarize_range(rfirst, rlast); \ | ||
| 602 | } else { \ | ||
| 603 | a_prefix##summarize_range(swap_loc + 1, rlast); \ | ||
| 604 | (void)a_summarize(swap_loc->node, \ | ||
| 605 | rbtn_left_get(a_type, a_field, swap_loc->node), \ | ||
| 606 | rbtn_right_get(a_type, a_field, swap_loc->node)); \ | ||
| 607 | a_prefix##summarize_range(rfirst, swap_loc - 1); \ | ||
| 608 | } \ | ||
| 609 | } \ | ||
| 610 | a_attr void \ | ||
| 611 | a_prefix##new(a_rbt_type *rbtree) { \ | ||
| 612 | rb_new(a_type, a_field, rbtree); \ | ||
| 613 | } \ | ||
| 614 | a_attr bool \ | ||
| 615 | a_prefix##empty(a_rbt_type *rbtree) { \ | ||
| 616 | return (rbtree->rbt_root == NULL); \ | ||
| 617 | } \ | ||
| 618 | a_attr a_type * \ | ||
| 619 | a_prefix##first(a_rbt_type *rbtree) { \ | ||
| 620 | a_type *ret; \ | ||
| 621 | rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ | ||
| 622 | return ret; \ | ||
| 623 | } \ | ||
| 624 | a_attr a_type * \ | ||
| 625 | a_prefix##last(a_rbt_type *rbtree) { \ | ||
| 626 | a_type *ret; \ | ||
| 627 | rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ | ||
| 628 | return ret; \ | ||
| 629 | } \ | ||
| 630 | a_attr a_type * \ | ||
| 631 | a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ | ||
| 632 | a_type *ret; \ | ||
| 633 | if (rbtn_right_get(a_type, a_field, node) != NULL) { \ | ||
| 634 | rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ | ||
| 635 | a_field, node), ret); \ | ||
| 636 | } else { \ | ||
| 637 | a_type *tnode = rbtree->rbt_root; \ | ||
| 638 | assert(tnode != NULL); \ | ||
| 639 | ret = NULL; \ | ||
| 640 | while (true) { \ | ||
| 641 | int cmp = (a_cmp)(node, tnode); \ | ||
| 642 | if (cmp < 0) { \ | ||
| 643 | ret = tnode; \ | ||
| 644 | tnode = rbtn_left_get(a_type, a_field, tnode); \ | ||
| 645 | } else if (cmp > 0) { \ | ||
| 646 | tnode = rbtn_right_get(a_type, a_field, tnode); \ | ||
| 647 | } else { \ | ||
| 648 | break; \ | ||
| 649 | } \ | ||
| 650 | assert(tnode != NULL); \ | ||
| 651 | } \ | ||
| 652 | } \ | ||
| 653 | return ret; \ | ||
| 654 | } \ | ||
| 655 | a_attr a_type * \ | ||
| 656 | a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ | ||
| 657 | a_type *ret; \ | ||
| 658 | if (rbtn_left_get(a_type, a_field, node) != NULL) { \ | ||
| 659 | rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ | ||
| 660 | a_field, node), ret); \ | ||
| 661 | } else { \ | ||
| 662 | a_type *tnode = rbtree->rbt_root; \ | ||
| 663 | assert(tnode != NULL); \ | ||
| 664 | ret = NULL; \ | ||
| 665 | while (true) { \ | ||
| 666 | int cmp = (a_cmp)(node, tnode); \ | ||
| 667 | if (cmp < 0) { \ | ||
| 668 | tnode = rbtn_left_get(a_type, a_field, tnode); \ | ||
| 669 | } else if (cmp > 0) { \ | ||
| 670 | ret = tnode; \ | ||
| 671 | tnode = rbtn_right_get(a_type, a_field, tnode); \ | ||
| 672 | } else { \ | ||
| 673 | break; \ | ||
| 674 | } \ | ||
| 675 | assert(tnode != NULL); \ | ||
| 676 | } \ | ||
| 677 | } \ | ||
| 678 | return ret; \ | ||
| 679 | } \ | ||
| 680 | a_attr a_type * \ | ||
| 681 | a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ | ||
| 682 | a_type *ret; \ | ||
| 683 | int cmp; \ | ||
| 684 | ret = rbtree->rbt_root; \ | ||
| 685 | while (ret != NULL \ | ||
| 686 | && (cmp = (a_cmp)(key, ret)) != 0) { \ | ||
| 687 | if (cmp < 0) { \ | ||
| 688 | ret = rbtn_left_get(a_type, a_field, ret); \ | ||
| 689 | } else { \ | ||
| 690 | ret = rbtn_right_get(a_type, a_field, ret); \ | ||
| 691 | } \ | ||
| 692 | } \ | ||
| 693 | return ret; \ | ||
| 694 | } \ | ||
| 695 | a_attr a_type * \ | ||
| 696 | a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ | ||
| 697 | a_type *ret; \ | ||
| 698 | a_type *tnode = rbtree->rbt_root; \ | ||
| 699 | ret = NULL; \ | ||
| 700 | while (tnode != NULL) { \ | ||
| 701 | int cmp = (a_cmp)(key, tnode); \ | ||
| 702 | if (cmp < 0) { \ | ||
| 703 | ret = tnode; \ | ||
| 704 | tnode = rbtn_left_get(a_type, a_field, tnode); \ | ||
| 705 | } else if (cmp > 0) { \ | ||
| 706 | tnode = rbtn_right_get(a_type, a_field, tnode); \ | ||
| 707 | } else { \ | ||
| 708 | ret = tnode; \ | ||
| 709 | break; \ | ||
| 710 | } \ | ||
| 711 | } \ | ||
| 712 | return ret; \ | ||
| 713 | } \ | ||
| 714 | a_attr a_type * \ | ||
| 715 | a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ | ||
| 716 | a_type *ret; \ | ||
| 717 | a_type *tnode = rbtree->rbt_root; \ | ||
| 718 | ret = NULL; \ | ||
| 719 | while (tnode != NULL) { \ | ||
| 720 | int cmp = (a_cmp)(key, tnode); \ | ||
| 721 | if (cmp < 0) { \ | ||
| 722 | tnode = rbtn_left_get(a_type, a_field, tnode); \ | ||
| 723 | } else if (cmp > 0) { \ | ||
| 724 | ret = tnode; \ | ||
| 725 | tnode = rbtn_right_get(a_type, a_field, tnode); \ | ||
| 726 | } else { \ | ||
| 727 | ret = tnode; \ | ||
| 728 | break; \ | ||
| 729 | } \ | ||
| 730 | } \ | ||
| 731 | return ret; \ | ||
| 732 | } \ | ||
| 733 | a_attr void \ | ||
| 734 | a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ | ||
| 735 | a_prefix##path_entry_t path[RB_MAX_DEPTH]; \ | ||
| 736 | a_prefix##path_entry_t *pathp; \ | ||
| 737 | rbt_node_new(a_type, a_field, rbtree, node); \ | ||
| 738 | /* Wind. */ \ | ||
| 739 | path->node = rbtree->rbt_root; \ | ||
| 740 | for (pathp = path; pathp->node != NULL; pathp++) { \ | ||
| 741 | int cmp = pathp->cmp = a_cmp(node, pathp->node); \ | ||
| 742 | assert(cmp != 0); \ | ||
| 743 | if (cmp < 0) { \ | ||
| 744 | pathp[1].node = rbtn_left_get(a_type, a_field, \ | ||
| 745 | pathp->node); \ | ||
| 746 | } else { \ | ||
| 747 | pathp[1].node = rbtn_right_get(a_type, a_field, \ | ||
| 748 | pathp->node); \ | ||
| 749 | } \ | ||
| 750 | } \ | ||
| 751 | pathp->node = node; \ | ||
| 752 | /* A loop invariant we maintain is that all nodes with */\ | ||
| 753 | /* out-of-date summaries live in path[0], path[1], ..., *pathp. */\ | ||
| 754 | /* To maintain this, we have to summarize node, since we */\ | ||
| 755 | /* decrement pathp before the first iteration. */\ | ||
| 756 | assert(rbtn_left_get(a_type, a_field, node) == NULL); \ | ||
| 757 | assert(rbtn_right_get(a_type, a_field, node) == NULL); \ | ||
| 758 | (void)a_summarize(node, NULL, NULL); \ | ||
| 759 | /* Unwind. */ \ | ||
| 760 | for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ | ||
| 761 | a_type *cnode = pathp->node; \ | ||
| 762 | if (pathp->cmp < 0) { \ | ||
| 763 | a_type *left = pathp[1].node; \ | ||
| 764 | rbtn_left_set(a_type, a_field, cnode, left); \ | ||
| 765 | if (rbtn_red_get(a_type, a_field, left)) { \ | ||
| 766 | a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ | ||
| 767 | if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ | ||
| 768 | leftleft)) { \ | ||
| 769 | /* Fix up 4-node. */ \ | ||
| 770 | a_type *tnode; \ | ||
| 771 | rbtn_black_set(a_type, a_field, leftleft); \ | ||
| 772 | rbtn_rotate_right(a_type, a_field, cnode, tnode); \ | ||
| 773 | (void)a_summarize(cnode, \ | ||
| 774 | rbtn_left_get(a_type, a_field, cnode), \ | ||
| 775 | rbtn_right_get(a_type, a_field, cnode)); \ | ||
| 776 | cnode = tnode; \ | ||
| 777 | } \ | ||
| 778 | } else { \ | ||
| 779 | a_prefix##summarize_range(path, pathp); \ | ||
| 780 | return; \ | ||
| 781 | } \ | ||
| 782 | } else { \ | ||
| 783 | a_type *right = pathp[1].node; \ | ||
| 784 | rbtn_right_set(a_type, a_field, cnode, right); \ | ||
| 785 | if (rbtn_red_get(a_type, a_field, right)) { \ | ||
| 786 | a_type *left = rbtn_left_get(a_type, a_field, cnode); \ | ||
| 787 | if (left != NULL && rbtn_red_get(a_type, a_field, \ | ||
| 788 | left)) { \ | ||
| 789 | /* Split 4-node. */ \ | ||
| 790 | rbtn_black_set(a_type, a_field, left); \ | ||
| 791 | rbtn_black_set(a_type, a_field, right); \ | ||
| 792 | rbtn_red_set(a_type, a_field, cnode); \ | ||
| 793 | } else { \ | ||
| 794 | /* Lean left. */ \ | ||
| 795 | a_type *tnode; \ | ||
| 796 | bool tred = rbtn_red_get(a_type, a_field, cnode); \ | ||
| 797 | rbtn_rotate_left(a_type, a_field, cnode, tnode); \ | ||
| 798 | rbtn_color_set(a_type, a_field, tnode, tred); \ | ||
| 799 | rbtn_red_set(a_type, a_field, cnode); \ | ||
| 800 | (void)a_summarize(cnode, \ | ||
| 801 | rbtn_left_get(a_type, a_field, cnode), \ | ||
| 802 | rbtn_right_get(a_type, a_field, cnode)); \ | ||
| 803 | cnode = tnode; \ | ||
| 804 | } \ | ||
| 805 | } else { \ | ||
| 806 | a_prefix##summarize_range(path, pathp); \ | ||
| 807 | return; \ | ||
| 808 | } \ | ||
| 809 | } \ | ||
| 810 | pathp->node = cnode; \ | ||
| 811 | (void)a_summarize(cnode, \ | ||
| 812 | rbtn_left_get(a_type, a_field, cnode), \ | ||
| 813 | rbtn_right_get(a_type, a_field, cnode)); \ | ||
| 814 | } \ | ||
| 815 | /* Set root, and make it black. */ \ | ||
| 816 | rbtree->rbt_root = path->node; \ | ||
| 817 | rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ | ||
| 818 | } \ | ||
| 819 | a_attr void \ | ||
| 820 | a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ | ||
| 821 | a_prefix##path_entry_t path[RB_MAX_DEPTH]; \ | ||
| 822 | a_prefix##path_entry_t *pathp; \ | ||
| 823 | a_prefix##path_entry_t *nodep; \ | ||
| 824 | a_prefix##path_entry_t *swap_loc; \ | ||
| 825 | /* This is a "real" sentinel -- NULL means we didn't swap the */\ | ||
| 826 | /* node to be pruned with one of its successors, and so */\ | ||
| 827 | /* summarization can terminate early whenever some summary */\ | ||
| 828 | /* doesn't change. */\ | ||
| 829 | swap_loc = NULL; \ | ||
| 830 | /* This is just to silence a compiler warning. */ \ | ||
| 831 | nodep = NULL; \ | ||
| 832 | /* Wind. */ \ | ||
| 833 | path->node = rbtree->rbt_root; \ | ||
| 834 | for (pathp = path; pathp->node != NULL; pathp++) { \ | ||
| 835 | int cmp = pathp->cmp = a_cmp(node, pathp->node); \ | ||
| 836 | if (cmp < 0) { \ | ||
| 837 | pathp[1].node = rbtn_left_get(a_type, a_field, \ | ||
| 838 | pathp->node); \ | ||
| 839 | } else { \ | ||
| 840 | pathp[1].node = rbtn_right_get(a_type, a_field, \ | ||
| 841 | pathp->node); \ | ||
| 842 | if (cmp == 0) { \ | ||
| 843 | /* Find node's successor, in preparation for swap. */ \ | ||
| 844 | pathp->cmp = 1; \ | ||
| 845 | nodep = pathp; \ | ||
| 846 | for (pathp++; pathp->node != NULL; pathp++) { \ | ||
| 847 | pathp->cmp = -1; \ | ||
| 848 | pathp[1].node = rbtn_left_get(a_type, a_field, \ | ||
| 849 | pathp->node); \ | ||
| 850 | } \ | ||
| 851 | break; \ | ||
| 852 | } \ | ||
| 853 | } \ | ||
| 854 | } \ | ||
| 855 | assert(nodep->node == node); \ | ||
| 856 | pathp--; \ | ||
| 857 | if (pathp->node != node) { \ | ||
| 858 | /* Swap node with its successor. */ \ | ||
| 859 | swap_loc = nodep; \ | ||
| 860 | bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ | ||
| 861 | rbtn_color_set(a_type, a_field, pathp->node, \ | ||
| 862 | rbtn_red_get(a_type, a_field, node)); \ | ||
| 863 | rbtn_left_set(a_type, a_field, pathp->node, \ | ||
| 864 | rbtn_left_get(a_type, a_field, node)); \ | ||
| 865 | /* If node's successor is its right child, the following code */\ | ||
| 866 | /* will do the wrong thing for the right child pointer. */\ | ||
| 867 | /* However, it doesn't matter, because the pointer will be */\ | ||
| 868 | /* properly set when the successor is pruned. */\ | ||
| 869 | rbtn_right_set(a_type, a_field, pathp->node, \ | ||
| 870 | rbtn_right_get(a_type, a_field, node)); \ | ||
| 871 | rbtn_color_set(a_type, a_field, node, tred); \ | ||
| 872 | /* The pruned leaf node's child pointers are never accessed */\ | ||
| 873 | /* again, so don't bother setting them to nil. */\ | ||
| 874 | nodep->node = pathp->node; \ | ||
| 875 | pathp->node = node; \ | ||
| 876 | if (nodep == path) { \ | ||
| 877 | rbtree->rbt_root = nodep->node; \ | ||
| 878 | } else { \ | ||
| 879 | if (nodep[-1].cmp < 0) { \ | ||
| 880 | rbtn_left_set(a_type, a_field, nodep[-1].node, \ | ||
| 881 | nodep->node); \ | ||
| 882 | } else { \ | ||
| 883 | rbtn_right_set(a_type, a_field, nodep[-1].node, \ | ||
| 884 | nodep->node); \ | ||
| 885 | } \ | ||
| 886 | } \ | ||
| 887 | } else { \ | ||
| 888 | a_type *left = rbtn_left_get(a_type, a_field, node); \ | ||
| 889 | if (left != NULL) { \ | ||
| 890 | /* node has no successor, but it has a left child. */\ | ||
| 891 | /* Splice node out, without losing the left child. */\ | ||
| 892 | assert(!rbtn_red_get(a_type, a_field, node)); \ | ||
| 893 | assert(rbtn_red_get(a_type, a_field, left)); \ | ||
| 894 | rbtn_black_set(a_type, a_field, left); \ | ||
| 895 | if (pathp == path) { \ | ||
| 896 | rbtree->rbt_root = left; \ | ||
| 897 | /* Nothing to summarize -- the subtree rooted at the */\ | ||
| 898 | /* node's left child hasn't changed, and it's now the */\ | ||
| 899 | /* root. */\ | ||
| 900 | } else { \ | ||
| 901 | if (pathp[-1].cmp < 0) { \ | ||
| 902 | rbtn_left_set(a_type, a_field, pathp[-1].node, \ | ||
| 903 | left); \ | ||
| 904 | } else { \ | ||
| 905 | rbtn_right_set(a_type, a_field, pathp[-1].node, \ | ||
| 906 | left); \ | ||
| 907 | } \ | ||
| 908 | a_prefix##summarize_swapped_range(path, &pathp[-1], \ | ||
| 909 | swap_loc); \ | ||
| 910 | } \ | ||
| 911 | return; \ | ||
| 912 | } else if (pathp == path) { \ | ||
| 913 | /* The tree only contained one node. */ \ | ||
| 914 | rbtree->rbt_root = NULL; \ | ||
| 915 | return; \ | ||
| 916 | } \ | ||
| 917 | } \ | ||
| 918 | /* We've now established the invariant that the node has no right */\ | ||
| 919 | /* child (well, morally; we didn't bother nulling it out if we */\ | ||
| 920 | /* swapped it with its successor), and that the only nodes with */\ | ||
| 921 | /* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/\ | ||
| 922 | if (rbtn_red_get(a_type, a_field, pathp->node)) { \ | ||
| 923 | /* Prune red node, which requires no fixup. */ \ | ||
| 924 | assert(pathp[-1].cmp < 0); \ | ||
| 925 | rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ | ||
| 926 | a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc); \ | ||
| 927 | return; \ | ||
| 928 | } \ | ||
| 929 | /* The node to be pruned is black, so unwind until balance is */\ | ||
| 930 | /* restored. */\ | ||
| 931 | pathp->node = NULL; \ | ||
| 932 | for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ | ||
| 933 | assert(pathp->cmp != 0); \ | ||
| 934 | if (pathp->cmp < 0) { \ | ||
| 935 | rbtn_left_set(a_type, a_field, pathp->node, \ | ||
| 936 | pathp[1].node); \ | ||
| 937 | if (rbtn_red_get(a_type, a_field, pathp->node)) { \ | ||
| 938 | a_type *right = rbtn_right_get(a_type, a_field, \ | ||
| 939 | pathp->node); \ | ||
| 940 | a_type *rightleft = rbtn_left_get(a_type, a_field, \ | ||
| 941 | right); \ | ||
| 942 | a_type *tnode; \ | ||
| 943 | if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ | ||
| 944 | rightleft)) { \ | ||
| 945 | /* In the following diagrams, ||, //, and \\ */\ | ||
| 946 | /* indicate the path to the removed node. */\ | ||
| 947 | /* */\ | ||
| 948 | /* || */\ | ||
| 949 | /* pathp(r) */\ | ||
| 950 | /* // \ */\ | ||
| 951 | /* (b) (b) */\ | ||
| 952 | /* / */\ | ||
| 953 | /* (r) */\ | ||
| 954 | /* */\ | ||
| 955 | rbtn_black_set(a_type, a_field, pathp->node); \ | ||
| 956 | rbtn_rotate_right(a_type, a_field, right, tnode); \ | ||
| 957 | rbtn_right_set(a_type, a_field, pathp->node, tnode);\ | ||
| 958 | rbtn_rotate_left(a_type, a_field, pathp->node, \ | ||
| 959 | tnode); \ | ||
| 960 | (void)a_summarize(pathp->node, \ | ||
| 961 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 962 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 963 | (void)a_summarize(right, \ | ||
| 964 | rbtn_left_get(a_type, a_field, right), \ | ||
| 965 | rbtn_right_get(a_type, a_field, right)); \ | ||
| 966 | } else { \ | ||
| 967 | /* || */\ | ||
| 968 | /* pathp(r) */\ | ||
| 969 | /* // \ */\ | ||
| 970 | /* (b) (b) */\ | ||
| 971 | /* / */\ | ||
| 972 | /* (b) */\ | ||
| 973 | /* */\ | ||
| 974 | rbtn_rotate_left(a_type, a_field, pathp->node, \ | ||
| 975 | tnode); \ | ||
| 976 | (void)a_summarize(pathp->node, \ | ||
| 977 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 978 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 979 | } \ | ||
| 980 | (void)a_summarize(tnode, rbtn_left_get(a_type, a_field, \ | ||
| 981 | tnode), rbtn_right_get(a_type, a_field, tnode)); \ | ||
| 982 | /* Balance restored, but rotation modified subtree */\ | ||
| 983 | /* root. */\ | ||
| 984 | assert((uintptr_t)pathp > (uintptr_t)path); \ | ||
| 985 | if (pathp[-1].cmp < 0) { \ | ||
| 986 | rbtn_left_set(a_type, a_field, pathp[-1].node, \ | ||
| 987 | tnode); \ | ||
| 988 | } else { \ | ||
| 989 | rbtn_right_set(a_type, a_field, pathp[-1].node, \ | ||
| 990 | tnode); \ | ||
| 991 | } \ | ||
| 992 | a_prefix##summarize_swapped_range(path, &pathp[-1], \ | ||
| 993 | swap_loc); \ | ||
| 994 | return; \ | ||
| 995 | } else { \ | ||
| 996 | a_type *right = rbtn_right_get(a_type, a_field, \ | ||
| 997 | pathp->node); \ | ||
| 998 | a_type *rightleft = rbtn_left_get(a_type, a_field, \ | ||
| 999 | right); \ | ||
| 1000 | if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ | ||
| 1001 | rightleft)) { \ | ||
| 1002 | /* || */\ | ||
| 1003 | /* pathp(b) */\ | ||
| 1004 | /* // \ */\ | ||
| 1005 | /* (b) (b) */\ | ||
| 1006 | /* / */\ | ||
| 1007 | /* (r) */\ | ||
| 1008 | a_type *tnode; \ | ||
| 1009 | rbtn_black_set(a_type, a_field, rightleft); \ | ||
| 1010 | rbtn_rotate_right(a_type, a_field, right, tnode); \ | ||
| 1011 | rbtn_right_set(a_type, a_field, pathp->node, tnode);\ | ||
| 1012 | rbtn_rotate_left(a_type, a_field, pathp->node, \ | ||
| 1013 | tnode); \ | ||
| 1014 | (void)a_summarize(pathp->node, \ | ||
| 1015 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 1016 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 1017 | (void)a_summarize(right, \ | ||
| 1018 | rbtn_left_get(a_type, a_field, right), \ | ||
| 1019 | rbtn_right_get(a_type, a_field, right)); \ | ||
| 1020 | (void)a_summarize(tnode, \ | ||
| 1021 | rbtn_left_get(a_type, a_field, tnode), \ | ||
| 1022 | rbtn_right_get(a_type, a_field, tnode)); \ | ||
| 1023 | /* Balance restored, but rotation modified */\ | ||
| 1024 | /* subtree root, which may actually be the tree */\ | ||
| 1025 | /* root. */\ | ||
| 1026 | if (pathp == path) { \ | ||
| 1027 | /* Set root. */ \ | ||
| 1028 | rbtree->rbt_root = tnode; \ | ||
| 1029 | } else { \ | ||
| 1030 | if (pathp[-1].cmp < 0) { \ | ||
| 1031 | rbtn_left_set(a_type, a_field, \ | ||
| 1032 | pathp[-1].node, tnode); \ | ||
| 1033 | } else { \ | ||
| 1034 | rbtn_right_set(a_type, a_field, \ | ||
| 1035 | pathp[-1].node, tnode); \ | ||
| 1036 | } \ | ||
| 1037 | a_prefix##summarize_swapped_range(path, \ | ||
| 1038 | &pathp[-1], swap_loc); \ | ||
| 1039 | } \ | ||
| 1040 | return; \ | ||
| 1041 | } else { \ | ||
| 1042 | /* || */\ | ||
| 1043 | /* pathp(b) */\ | ||
| 1044 | /* // \ */\ | ||
| 1045 | /* (b) (b) */\ | ||
| 1046 | /* / */\ | ||
| 1047 | /* (b) */\ | ||
| 1048 | a_type *tnode; \ | ||
| 1049 | rbtn_red_set(a_type, a_field, pathp->node); \ | ||
| 1050 | rbtn_rotate_left(a_type, a_field, pathp->node, \ | ||
| 1051 | tnode); \ | ||
| 1052 | (void)a_summarize(pathp->node, \ | ||
| 1053 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 1054 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 1055 | (void)a_summarize(tnode, \ | ||
| 1056 | rbtn_left_get(a_type, a_field, tnode), \ | ||
| 1057 | rbtn_right_get(a_type, a_field, tnode)); \ | ||
| 1058 | pathp->node = tnode; \ | ||
| 1059 | } \ | ||
| 1060 | } \ | ||
| 1061 | } else { \ | ||
| 1062 | a_type *left; \ | ||
| 1063 | rbtn_right_set(a_type, a_field, pathp->node, \ | ||
| 1064 | pathp[1].node); \ | ||
| 1065 | left = rbtn_left_get(a_type, a_field, pathp->node); \ | ||
| 1066 | if (rbtn_red_get(a_type, a_field, left)) { \ | ||
| 1067 | a_type *tnode; \ | ||
| 1068 | a_type *leftright = rbtn_right_get(a_type, a_field, \ | ||
| 1069 | left); \ | ||
| 1070 | a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ | ||
| 1071 | leftright); \ | ||
| 1072 | if (leftrightleft != NULL && rbtn_red_get(a_type, \ | ||
| 1073 | a_field, leftrightleft)) { \ | ||
| 1074 | /* || */\ | ||
| 1075 | /* pathp(b) */\ | ||
| 1076 | /* / \\ */\ | ||
| 1077 | /* (r) (b) */\ | ||
| 1078 | /* \ */\ | ||
| 1079 | /* (b) */\ | ||
| 1080 | /* / */\ | ||
| 1081 | /* (r) */\ | ||
| 1082 | a_type *unode; \ | ||
| 1083 | rbtn_black_set(a_type, a_field, leftrightleft); \ | ||
| 1084 | rbtn_rotate_right(a_type, a_field, pathp->node, \ | ||
| 1085 | unode); \ | ||
| 1086 | rbtn_rotate_right(a_type, a_field, pathp->node, \ | ||
| 1087 | tnode); \ | ||
| 1088 | rbtn_right_set(a_type, a_field, unode, tnode); \ | ||
| 1089 | rbtn_rotate_left(a_type, a_field, unode, tnode); \ | ||
| 1090 | (void)a_summarize(pathp->node, \ | ||
| 1091 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 1092 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 1093 | (void)a_summarize(unode, \ | ||
| 1094 | rbtn_left_get(a_type, a_field, unode), \ | ||
| 1095 | rbtn_right_get(a_type, a_field, unode)); \ | ||
| 1096 | } else { \ | ||
| 1097 | /* || */\ | ||
| 1098 | /* pathp(b) */\ | ||
| 1099 | /* / \\ */\ | ||
| 1100 | /* (r) (b) */\ | ||
| 1101 | /* \ */\ | ||
| 1102 | /* (b) */\ | ||
| 1103 | /* / */\ | ||
| 1104 | /* (b) */\ | ||
| 1105 | assert(leftright != NULL); \ | ||
| 1106 | rbtn_red_set(a_type, a_field, leftright); \ | ||
| 1107 | rbtn_rotate_right(a_type, a_field, pathp->node, \ | ||
| 1108 | tnode); \ | ||
| 1109 | rbtn_black_set(a_type, a_field, tnode); \ | ||
| 1110 | (void)a_summarize(pathp->node, \ | ||
| 1111 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 1112 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 1113 | } \ | ||
| 1114 | (void)a_summarize(tnode, \ | ||
| 1115 | rbtn_left_get(a_type, a_field, tnode), \ | ||
| 1116 | rbtn_right_get(a_type, a_field, tnode)); \ | ||
| 1117 | /* Balance restored, but rotation modified subtree */\ | ||
| 1118 | /* root, which may actually be the tree root. */\ | ||
| 1119 | if (pathp == path) { \ | ||
| 1120 | /* Set root. */ \ | ||
| 1121 | rbtree->rbt_root = tnode; \ | ||
| 1122 | } else { \ | ||
| 1123 | if (pathp[-1].cmp < 0) { \ | ||
| 1124 | rbtn_left_set(a_type, a_field, pathp[-1].node, \ | ||
| 1125 | tnode); \ | ||
| 1126 | } else { \ | ||
| 1127 | rbtn_right_set(a_type, a_field, pathp[-1].node, \ | ||
| 1128 | tnode); \ | ||
| 1129 | } \ | ||
| 1130 | a_prefix##summarize_swapped_range(path, &pathp[-1], \ | ||
| 1131 | swap_loc); \ | ||
| 1132 | } \ | ||
| 1133 | return; \ | ||
| 1134 | } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ | ||
| 1135 | a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ | ||
| 1136 | if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ | ||
| 1137 | leftleft)) { \ | ||
| 1138 | /* || */\ | ||
| 1139 | /* pathp(r) */\ | ||
| 1140 | /* / \\ */\ | ||
| 1141 | /* (b) (b) */\ | ||
| 1142 | /* / */\ | ||
| 1143 | /* (r) */\ | ||
| 1144 | a_type *tnode; \ | ||
| 1145 | rbtn_black_set(a_type, a_field, pathp->node); \ | ||
| 1146 | rbtn_red_set(a_type, a_field, left); \ | ||
| 1147 | rbtn_black_set(a_type, a_field, leftleft); \ | ||
| 1148 | rbtn_rotate_right(a_type, a_field, pathp->node, \ | ||
| 1149 | tnode); \ | ||
| 1150 | (void)a_summarize(pathp->node, \ | ||
| 1151 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 1152 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 1153 | (void)a_summarize(tnode, \ | ||
| 1154 | rbtn_left_get(a_type, a_field, tnode), \ | ||
| 1155 | rbtn_right_get(a_type, a_field, tnode)); \ | ||
| 1156 | /* Balance restored, but rotation modified */\ | ||
| 1157 | /* subtree root. */\ | ||
| 1158 | assert((uintptr_t)pathp > (uintptr_t)path); \ | ||
| 1159 | if (pathp[-1].cmp < 0) { \ | ||
| 1160 | rbtn_left_set(a_type, a_field, pathp[-1].node, \ | ||
| 1161 | tnode); \ | ||
| 1162 | } else { \ | ||
| 1163 | rbtn_right_set(a_type, a_field, pathp[-1].node, \ | ||
| 1164 | tnode); \ | ||
| 1165 | } \ | ||
| 1166 | a_prefix##summarize_swapped_range(path, &pathp[-1], \ | ||
| 1167 | swap_loc); \ | ||
| 1168 | return; \ | ||
| 1169 | } else { \ | ||
| 1170 | /* || */\ | ||
| 1171 | /* pathp(r) */\ | ||
| 1172 | /* / \\ */\ | ||
| 1173 | /* (b) (b) */\ | ||
| 1174 | /* / */\ | ||
| 1175 | /* (b) */\ | ||
| 1176 | rbtn_red_set(a_type, a_field, left); \ | ||
| 1177 | rbtn_black_set(a_type, a_field, pathp->node); \ | ||
| 1178 | /* Balance restored. */ \ | ||
| 1179 | a_prefix##summarize_swapped_range(path, pathp, \ | ||
| 1180 | swap_loc); \ | ||
| 1181 | return; \ | ||
| 1182 | } \ | ||
| 1183 | } else { \ | ||
| 1184 | a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ | ||
| 1185 | if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ | ||
| 1186 | leftleft)) { \ | ||
| 1187 | /* || */\ | ||
| 1188 | /* pathp(b) */\ | ||
| 1189 | /* / \\ */\ | ||
| 1190 | /* (b) (b) */\ | ||
| 1191 | /* / */\ | ||
| 1192 | /* (r) */\ | ||
| 1193 | a_type *tnode; \ | ||
| 1194 | rbtn_black_set(a_type, a_field, leftleft); \ | ||
| 1195 | rbtn_rotate_right(a_type, a_field, pathp->node, \ | ||
| 1196 | tnode); \ | ||
| 1197 | (void)a_summarize(pathp->node, \ | ||
| 1198 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 1199 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 1200 | (void)a_summarize(tnode, \ | ||
| 1201 | rbtn_left_get(a_type, a_field, tnode), \ | ||
| 1202 | rbtn_right_get(a_type, a_field, tnode)); \ | ||
| 1203 | /* Balance restored, but rotation modified */\ | ||
| 1204 | /* subtree root, which may actually be the tree */\ | ||
| 1205 | /* root. */\ | ||
| 1206 | if (pathp == path) { \ | ||
| 1207 | /* Set root. */ \ | ||
| 1208 | rbtree->rbt_root = tnode; \ | ||
| 1209 | } else { \ | ||
| 1210 | if (pathp[-1].cmp < 0) { \ | ||
| 1211 | rbtn_left_set(a_type, a_field, \ | ||
| 1212 | pathp[-1].node, tnode); \ | ||
| 1213 | } else { \ | ||
| 1214 | rbtn_right_set(a_type, a_field, \ | ||
| 1215 | pathp[-1].node, tnode); \ | ||
| 1216 | } \ | ||
| 1217 | a_prefix##summarize_swapped_range(path, \ | ||
| 1218 | &pathp[-1], swap_loc); \ | ||
| 1219 | } \ | ||
| 1220 | return; \ | ||
| 1221 | } else { \ | ||
| 1222 | /* || */\ | ||
| 1223 | /* pathp(b) */\ | ||
| 1224 | /* / \\ */\ | ||
| 1225 | /* (b) (b) */\ | ||
| 1226 | /* / */\ | ||
| 1227 | /* (b) */\ | ||
| 1228 | rbtn_red_set(a_type, a_field, left); \ | ||
| 1229 | (void)a_summarize(pathp->node, \ | ||
| 1230 | rbtn_left_get(a_type, a_field, pathp->node), \ | ||
| 1231 | rbtn_right_get(a_type, a_field, pathp->node)); \ | ||
| 1232 | } \ | ||
| 1233 | } \ | ||
| 1234 | } \ | ||
| 1235 | } \ | ||
| 1236 | /* Set root. */ \ | ||
| 1237 | rbtree->rbt_root = path->node; \ | ||
| 1238 | assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ | ||
| 1239 | } \ | ||
| 1240 | a_attr a_type * \ | ||
| 1241 | a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ | ||
| 1242 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ | ||
| 1243 | if (node == NULL) { \ | ||
| 1244 | return NULL; \ | ||
| 1245 | } else { \ | ||
| 1246 | a_type *ret; \ | ||
| 1247 | if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ | ||
| 1248 | a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ | ||
| 1249 | arg)) != NULL) { \ | ||
| 1250 | return ret; \ | ||
| 1251 | } \ | ||
| 1252 | return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ | ||
| 1253 | a_field, node), cb, arg); \ | ||
| 1254 | } \ | ||
| 1255 | } \ | ||
| 1256 | a_attr a_type * \ | ||
| 1257 | a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ | ||
| 1258 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ | ||
| 1259 | int cmp = a_cmp(start, node); \ | ||
| 1260 | if (cmp < 0) { \ | ||
| 1261 | a_type *ret; \ | ||
| 1262 | if ((ret = a_prefix##iter_start(rbtree, start, \ | ||
| 1263 | rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ | ||
| 1264 | (ret = cb(rbtree, node, arg)) != NULL) { \ | ||
| 1265 | return ret; \ | ||
| 1266 | } \ | ||
| 1267 | return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ | ||
| 1268 | a_field, node), cb, arg); \ | ||
| 1269 | } else if (cmp > 0) { \ | ||
| 1270 | return a_prefix##iter_start(rbtree, start, \ | ||
| 1271 | rbtn_right_get(a_type, a_field, node), cb, arg); \ | ||
| 1272 | } else { \ | ||
| 1273 | a_type *ret; \ | ||
| 1274 | if ((ret = cb(rbtree, node, arg)) != NULL) { \ | ||
| 1275 | return ret; \ | ||
| 1276 | } \ | ||
| 1277 | return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ | ||
| 1278 | a_field, node), cb, arg); \ | ||
| 1279 | } \ | ||
| 1280 | } \ | ||
| 1281 | a_attr a_type * \ | ||
| 1282 | a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ | ||
| 1283 | a_rbt_type *, a_type *, void *), void *arg) { \ | ||
| 1284 | a_type *ret; \ | ||
| 1285 | if (start != NULL) { \ | ||
| 1286 | ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ | ||
| 1287 | cb, arg); \ | ||
| 1288 | } else { \ | ||
| 1289 | ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ | ||
| 1290 | } \ | ||
| 1291 | return ret; \ | ||
| 1292 | } \ | ||
| 1293 | a_attr a_type * \ | ||
| 1294 | a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ | ||
| 1295 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ | ||
| 1296 | if (node == NULL) { \ | ||
| 1297 | return NULL; \ | ||
| 1298 | } else { \ | ||
| 1299 | a_type *ret; \ | ||
| 1300 | if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ | ||
| 1301 | rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ | ||
| 1302 | (ret = cb(rbtree, node, arg)) != NULL) { \ | ||
| 1303 | return ret; \ | ||
| 1304 | } \ | ||
| 1305 | return a_prefix##reverse_iter_recurse(rbtree, \ | ||
| 1306 | rbtn_left_get(a_type, a_field, node), cb, arg); \ | ||
| 1307 | } \ | ||
| 1308 | } \ | ||
| 1309 | a_attr a_type * \ | ||
| 1310 | a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ | ||
| 1311 | a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ | ||
| 1312 | void *arg) { \ | ||
| 1313 | int cmp = a_cmp(start, node); \ | ||
| 1314 | if (cmp > 0) { \ | ||
| 1315 | a_type *ret; \ | ||
| 1316 | if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ | ||
| 1317 | rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ | ||
| 1318 | (ret = cb(rbtree, node, arg)) != NULL) { \ | ||
| 1319 | return ret; \ | ||
| 1320 | } \ | ||
| 1321 | return a_prefix##reverse_iter_recurse(rbtree, \ | ||
| 1322 | rbtn_left_get(a_type, a_field, node), cb, arg); \ | ||
| 1323 | } else if (cmp < 0) { \ | ||
| 1324 | return a_prefix##reverse_iter_start(rbtree, start, \ | ||
| 1325 | rbtn_left_get(a_type, a_field, node), cb, arg); \ | ||
| 1326 | } else { \ | ||
| 1327 | a_type *ret; \ | ||
| 1328 | if ((ret = cb(rbtree, node, arg)) != NULL) { \ | ||
| 1329 | return ret; \ | ||
| 1330 | } \ | ||
| 1331 | return a_prefix##reverse_iter_recurse(rbtree, \ | ||
| 1332 | rbtn_left_get(a_type, a_field, node), cb, arg); \ | ||
| 1333 | } \ | ||
| 1334 | } \ | ||
| 1335 | a_attr a_type * \ | ||
| 1336 | a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ | ||
| 1337 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ | ||
| 1338 | a_type *ret; \ | ||
| 1339 | if (start != NULL) { \ | ||
| 1340 | ret = a_prefix##reverse_iter_start(rbtree, start, \ | ||
| 1341 | rbtree->rbt_root, cb, arg); \ | ||
| 1342 | } else { \ | ||
| 1343 | ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ | ||
| 1344 | cb, arg); \ | ||
| 1345 | } \ | ||
| 1346 | return ret; \ | ||
| 1347 | } \ | ||
| 1348 | a_attr void \ | ||
| 1349 | a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ | ||
| 1350 | a_type *, void *), void *arg) { \ | ||
| 1351 | if (node == NULL) { \ | ||
| 1352 | return; \ | ||
| 1353 | } \ | ||
| 1354 | a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ | ||
| 1355 | node), cb, arg); \ | ||
| 1356 | rbtn_left_set(a_type, a_field, (node), NULL); \ | ||
| 1357 | a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ | ||
| 1358 | node), cb, arg); \ | ||
| 1359 | rbtn_right_set(a_type, a_field, (node), NULL); \ | ||
| 1360 | if (cb) { \ | ||
| 1361 | cb(node, arg); \ | ||
| 1362 | } \ | ||
| 1363 | } \ | ||
| 1364 | a_attr void \ | ||
| 1365 | a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ | ||
| 1366 | void *arg) { \ | ||
| 1367 | a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ | ||
| 1368 | rbtree->rbt_root = NULL; \ | ||
| 1369 | } \ | ||
| 1370 | /* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */ \ | ||
| 1371 | rb_summarized_only_##a_is_summarized( \ | ||
| 1372 | static inline a_prefix##path_entry_t * \ | ||
| 1373 | a_prefix##wind(a_rbt_type *rbtree, \ | ||
| 1374 | a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) { \ | ||
| 1375 | a_prefix##path_entry_t *pathp; \ | ||
| 1376 | path->node = rbtree->rbt_root; \ | ||
| 1377 | for (pathp = path; ; pathp++) { \ | ||
| 1378 | assert((size_t)(pathp - path) < RB_MAX_DEPTH); \ | ||
| 1379 | pathp->cmp = a_cmp(node, pathp->node); \ | ||
| 1380 | if (pathp->cmp < 0) { \ | ||
| 1381 | pathp[1].node = rbtn_left_get(a_type, a_field, \ | ||
| 1382 | pathp->node); \ | ||
| 1383 | } else if (pathp->cmp == 0) { \ | ||
| 1384 | return pathp; \ | ||
| 1385 | } else { \ | ||
| 1386 | pathp[1].node = rbtn_right_get(a_type, a_field, \ | ||
| 1387 | pathp->node); \ | ||
| 1388 | } \ | ||
| 1389 | } \ | ||
| 1390 | unreachable(); \ | ||
| 1391 | } \ | ||
| 1392 | a_attr void \ | ||
| 1393 | a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) { \ | ||
| 1394 | a_prefix##path_entry_t path[RB_MAX_DEPTH]; \ | ||
| 1395 | a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node); \ | ||
| 1396 | a_prefix##summarize_range(path, pathp); \ | ||
| 1397 | } \ | ||
| 1398 | a_attr bool \ | ||
| 1399 | a_prefix##empty_filtered(a_rbt_type *rbtree, \ | ||
| 1400 | bool (*filter_node)(void *, a_type *), \ | ||
| 1401 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1402 | void *filter_ctx) { \ | ||
| 1403 | a_type *node = rbtree->rbt_root; \ | ||
| 1404 | return node == NULL || !filter_subtree(filter_ctx, node); \ | ||
| 1405 | } \ | ||
| 1406 | static inline a_type * \ | ||
| 1407 | a_prefix##first_filtered_from_node(a_type *node, \ | ||
| 1408 | bool (*filter_node)(void *, a_type *), \ | ||
| 1409 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1410 | void *filter_ctx) { \ | ||
| 1411 | assert(node != NULL && filter_subtree(filter_ctx, node)); \ | ||
| 1412 | while (true) { \ | ||
| 1413 | a_type *left = rbtn_left_get(a_type, a_field, node); \ | ||
| 1414 | a_type *right = rbtn_right_get(a_type, a_field, node); \ | ||
| 1415 | if (left != NULL && filter_subtree(filter_ctx, left)) { \ | ||
| 1416 | node = left; \ | ||
| 1417 | } else if (filter_node(filter_ctx, node)) { \ | ||
| 1418 | return node; \ | ||
| 1419 | } else { \ | ||
| 1420 | assert(right != NULL \ | ||
| 1421 | && filter_subtree(filter_ctx, right)); \ | ||
| 1422 | node = right; \ | ||
| 1423 | } \ | ||
| 1424 | } \ | ||
| 1425 | unreachable(); \ | ||
| 1426 | } \ | ||
| 1427 | a_attr a_type * \ | ||
| 1428 | a_prefix##first_filtered(a_rbt_type *rbtree, \ | ||
| 1429 | bool (*filter_node)(void *, a_type *), \ | ||
| 1430 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1431 | void *filter_ctx) { \ | ||
| 1432 | a_type *node = rbtree->rbt_root; \ | ||
| 1433 | if (node == NULL || !filter_subtree(filter_ctx, node)) { \ | ||
| 1434 | return NULL; \ | ||
| 1435 | } \ | ||
| 1436 | return a_prefix##first_filtered_from_node(node, filter_node, \ | ||
| 1437 | filter_subtree, filter_ctx); \ | ||
| 1438 | } \ | ||
| 1439 | static inline a_type * \ | ||
| 1440 | a_prefix##last_filtered_from_node(a_type *node, \ | ||
| 1441 | bool (*filter_node)(void *, a_type *), \ | ||
| 1442 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1443 | void *filter_ctx) { \ | ||
| 1444 | assert(node != NULL && filter_subtree(filter_ctx, node)); \ | ||
| 1445 | while (true) { \ | ||
| 1446 | a_type *left = rbtn_left_get(a_type, a_field, node); \ | ||
| 1447 | a_type *right = rbtn_right_get(a_type, a_field, node); \ | ||
| 1448 | if (right != NULL && filter_subtree(filter_ctx, right)) { \ | ||
| 1449 | node = right; \ | ||
| 1450 | } else if (filter_node(filter_ctx, node)) { \ | ||
| 1451 | return node; \ | ||
| 1452 | } else { \ | ||
| 1453 | assert(left != NULL \ | ||
| 1454 | && filter_subtree(filter_ctx, left)); \ | ||
| 1455 | node = left; \ | ||
| 1456 | } \ | ||
| 1457 | } \ | ||
| 1458 | unreachable(); \ | ||
| 1459 | } \ | ||
| 1460 | a_attr a_type * \ | ||
| 1461 | a_prefix##last_filtered(a_rbt_type *rbtree, \ | ||
| 1462 | bool (*filter_node)(void *, a_type *), \ | ||
| 1463 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1464 | void *filter_ctx) { \ | ||
| 1465 | a_type *node = rbtree->rbt_root; \ | ||
| 1466 | if (node == NULL || !filter_subtree(filter_ctx, node)) { \ | ||
| 1467 | return NULL; \ | ||
| 1468 | } \ | ||
| 1469 | return a_prefix##last_filtered_from_node(node, filter_node, \ | ||
| 1470 | filter_subtree, filter_ctx); \ | ||
| 1471 | } \ | ||
| 1472 | /* Internal implementation function. Search for a node comparing */\ | ||
| 1473 | /* equal to key matching the filter. If such a node is in the tree, */\ | ||
| 1474 | /* return it. Additionally, the caller has the option to ask for */\ | ||
| 1475 | /* bounds on the next / prev node in the tree passing the filter. */\ | ||
| 1476 | /* If nextbound is true, then this function will do one of the */\ | ||
| 1477 | /* following: */\ | ||
| 1478 | /* - Fill in *nextbound_node with the smallest node in the tree */\ | ||
| 1479 | /* greater than key passing the filter, and NULL-out */\ | ||
| 1480 | /* *nextbound_subtree. */\ | ||
| 1481 | /* - Fill in *nextbound_subtree with a parent of that node which is */\ | ||
| 1482 | /* not a parent of the searched-for node, and NULL-out */\ | ||
| 1483 | /* *nextbound_node. */\ | ||
| 1484 | /* - NULL-out both *nextbound_node and *nextbound_subtree, in which */\ | ||
| 1485 | /* case no node greater than key but passing the filter is in the */\ | ||
| 1486 | /* tree. */\ | ||
| 1487 | /* The prevbound case is similar. If the caller knows that key is in */\ | ||
| 1488 | /* the tree and that the subtree rooted at key does not contain a */\ | ||
| 1489 | /* node satisfying the bound being searched for, then they can pass */\ | ||
| 1490 | /* false for include_subtree, in which case we won't bother searching */\ | ||
| 1491 | /* there (risking a cache miss). */\ | ||
| 1492 | /* */\ | ||
| 1493 | /* This API is unfortunately complex; but the logic for filtered */\ | ||
| 1494 | /* searches is very subtle, and otherwise we would have to repeat it */\ | ||
| 1495 | /* multiple times for filtered search, nsearch, psearch, next, and */\ | ||
| 1496 | /* prev. */\ | ||
| 1497 | static inline a_type * \ | ||
| 1498 | a_prefix##search_with_filter_bounds(a_rbt_type *rbtree, \ | ||
| 1499 | const a_type *key, \ | ||
| 1500 | bool (*filter_node)(void *, a_type *), \ | ||
| 1501 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1502 | void *filter_ctx, \ | ||
| 1503 | bool include_subtree, \ | ||
| 1504 | bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree, \ | ||
| 1505 | bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\ | ||
| 1506 | if (nextbound) { \ | ||
| 1507 | *nextbound_node = NULL; \ | ||
| 1508 | *nextbound_subtree = NULL; \ | ||
| 1509 | } \ | ||
| 1510 | if (prevbound) { \ | ||
| 1511 | *prevbound_node = NULL; \ | ||
| 1512 | *prevbound_subtree = NULL; \ | ||
| 1513 | } \ | ||
| 1514 | a_type *tnode = rbtree->rbt_root; \ | ||
| 1515 | while (tnode != NULL && filter_subtree(filter_ctx, tnode)) { \ | ||
| 1516 | int cmp = a_cmp(key, tnode); \ | ||
| 1517 | a_type *tleft = rbtn_left_get(a_type, a_field, tnode); \ | ||
| 1518 | a_type *tright = rbtn_right_get(a_type, a_field, tnode); \ | ||
| 1519 | if (cmp < 0) { \ | ||
| 1520 | if (nextbound) { \ | ||
| 1521 | if (filter_node(filter_ctx, tnode)) { \ | ||
| 1522 | *nextbound_node = tnode; \ | ||
| 1523 | *nextbound_subtree = NULL; \ | ||
| 1524 | } else if (tright != NULL && filter_subtree( \ | ||
| 1525 | filter_ctx, tright)) { \ | ||
| 1526 | *nextbound_node = NULL; \ | ||
| 1527 | *nextbound_subtree = tright; \ | ||
| 1528 | } \ | ||
| 1529 | } \ | ||
| 1530 | tnode = tleft; \ | ||
| 1531 | } else if (cmp > 0) { \ | ||
| 1532 | if (prevbound) { \ | ||
| 1533 | if (filter_node(filter_ctx, tnode)) { \ | ||
| 1534 | *prevbound_node = tnode; \ | ||
| 1535 | *prevbound_subtree = NULL; \ | ||
| 1536 | } else if (tleft != NULL && filter_subtree( \ | ||
| 1537 | filter_ctx, tleft)) { \ | ||
| 1538 | *prevbound_node = NULL; \ | ||
| 1539 | *prevbound_subtree = tleft; \ | ||
| 1540 | } \ | ||
| 1541 | } \ | ||
| 1542 | tnode = tright; \ | ||
| 1543 | } else { \ | ||
| 1544 | if (filter_node(filter_ctx, tnode)) { \ | ||
| 1545 | return tnode; \ | ||
| 1546 | } \ | ||
| 1547 | if (include_subtree) { \ | ||
| 1548 | if (prevbound && tleft != NULL && filter_subtree( \ | ||
| 1549 | filter_ctx, tleft)) { \ | ||
| 1550 | *prevbound_node = NULL; \ | ||
| 1551 | *prevbound_subtree = tleft; \ | ||
| 1552 | } \ | ||
| 1553 | if (nextbound && tright != NULL && filter_subtree( \ | ||
| 1554 | filter_ctx, tright)) { \ | ||
| 1555 | *nextbound_node = NULL; \ | ||
| 1556 | *nextbound_subtree = tright; \ | ||
| 1557 | } \ | ||
| 1558 | } \ | ||
| 1559 | return NULL; \ | ||
| 1560 | } \ | ||
| 1561 | } \ | ||
| 1562 | return NULL; \ | ||
| 1563 | } \ | ||
| 1564 | a_attr a_type * \ | ||
| 1565 | a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \ | ||
| 1566 | bool (*filter_node)(void *, a_type *), \ | ||
| 1567 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1568 | void *filter_ctx) { \ | ||
| 1569 | a_type *nright = rbtn_right_get(a_type, a_field, node); \ | ||
| 1570 | if (nright != NULL && filter_subtree(filter_ctx, nright)) { \ | ||
| 1571 | return a_prefix##first_filtered_from_node(nright, filter_node, \ | ||
| 1572 | filter_subtree, filter_ctx); \ | ||
| 1573 | } \ | ||
| 1574 | a_type *node_candidate; \ | ||
| 1575 | a_type *subtree_candidate; \ | ||
| 1576 | a_type *search_result = a_prefix##search_with_filter_bounds( \ | ||
| 1577 | rbtree, node, filter_node, filter_subtree, filter_ctx, \ | ||
| 1578 | /* include_subtree */ false, \ | ||
| 1579 | /* nextbound */ true, &node_candidate, &subtree_candidate, \ | ||
| 1580 | /* prevbound */ false, NULL, NULL); \ | ||
| 1581 | assert(node == search_result \ | ||
| 1582 | || !filter_node(filter_ctx, node)); \ | ||
| 1583 | if (node_candidate != NULL) { \ | ||
| 1584 | return node_candidate; \ | ||
| 1585 | } \ | ||
| 1586 | if (subtree_candidate != NULL) { \ | ||
| 1587 | return a_prefix##first_filtered_from_node( \ | ||
| 1588 | subtree_candidate, filter_node, filter_subtree, \ | ||
| 1589 | filter_ctx); \ | ||
| 1590 | } \ | ||
| 1591 | return NULL; \ | ||
| 1592 | } \ | ||
| 1593 | a_attr a_type * \ | ||
| 1594 | a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \ | ||
| 1595 | bool (*filter_node)(void *, a_type *), \ | ||
| 1596 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1597 | void *filter_ctx) { \ | ||
| 1598 | a_type *nleft = rbtn_left_get(a_type, a_field, node); \ | ||
| 1599 | if (nleft != NULL && filter_subtree(filter_ctx, nleft)) { \ | ||
| 1600 | return a_prefix##last_filtered_from_node(nleft, filter_node, \ | ||
| 1601 | filter_subtree, filter_ctx); \ | ||
| 1602 | } \ | ||
| 1603 | a_type *node_candidate; \ | ||
| 1604 | a_type *subtree_candidate; \ | ||
| 1605 | a_type *search_result = a_prefix##search_with_filter_bounds( \ | ||
| 1606 | rbtree, node, filter_node, filter_subtree, filter_ctx, \ | ||
| 1607 | /* include_subtree */ false, \ | ||
| 1608 | /* nextbound */ false, NULL, NULL, \ | ||
| 1609 | /* prevbound */ true, &node_candidate, &subtree_candidate); \ | ||
| 1610 | assert(node == search_result \ | ||
| 1611 | || !filter_node(filter_ctx, node)); \ | ||
| 1612 | if (node_candidate != NULL) { \ | ||
| 1613 | return node_candidate; \ | ||
| 1614 | } \ | ||
| 1615 | if (subtree_candidate != NULL) { \ | ||
| 1616 | return a_prefix##last_filtered_from_node( \ | ||
| 1617 | subtree_candidate, filter_node, filter_subtree, \ | ||
| 1618 | filter_ctx); \ | ||
| 1619 | } \ | ||
| 1620 | return NULL; \ | ||
| 1621 | } \ | ||
| 1622 | a_attr a_type * \ | ||
| 1623 | a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \ | ||
| 1624 | bool (*filter_node)(void *, a_type *), \ | ||
| 1625 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1626 | void *filter_ctx) { \ | ||
| 1627 | a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \ | ||
| 1628 | filter_node, filter_subtree, filter_ctx, \ | ||
| 1629 | /* include_subtree */ false, \ | ||
| 1630 | /* nextbound */ false, NULL, NULL, \ | ||
| 1631 | /* prevbound */ false, NULL, NULL); \ | ||
| 1632 | return result; \ | ||
| 1633 | } \ | ||
| 1634 | a_attr a_type * \ | ||
| 1635 | a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \ | ||
| 1636 | bool (*filter_node)(void *, a_type *), \ | ||
| 1637 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1638 | void *filter_ctx) { \ | ||
| 1639 | a_type *node_candidate; \ | ||
| 1640 | a_type *subtree_candidate; \ | ||
| 1641 | a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \ | ||
| 1642 | filter_node, filter_subtree, filter_ctx, \ | ||
| 1643 | /* include_subtree */ true, \ | ||
| 1644 | /* nextbound */ true, &node_candidate, &subtree_candidate, \ | ||
| 1645 | /* prevbound */ false, NULL, NULL); \ | ||
| 1646 | if (result != NULL) { \ | ||
| 1647 | return result; \ | ||
| 1648 | } \ | ||
| 1649 | if (node_candidate != NULL) { \ | ||
| 1650 | return node_candidate; \ | ||
| 1651 | } \ | ||
| 1652 | if (subtree_candidate != NULL) { \ | ||
| 1653 | return a_prefix##first_filtered_from_node( \ | ||
| 1654 | subtree_candidate, filter_node, filter_subtree, \ | ||
| 1655 | filter_ctx); \ | ||
| 1656 | } \ | ||
| 1657 | return NULL; \ | ||
| 1658 | } \ | ||
| 1659 | a_attr a_type * \ | ||
| 1660 | a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \ | ||
| 1661 | bool (*filter_node)(void *, a_type *), \ | ||
| 1662 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1663 | void *filter_ctx) { \ | ||
| 1664 | a_type *node_candidate; \ | ||
| 1665 | a_type *subtree_candidate; \ | ||
| 1666 | a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \ | ||
| 1667 | filter_node, filter_subtree, filter_ctx, \ | ||
| 1668 | /* include_subtree */ true, \ | ||
| 1669 | /* nextbound */ false, NULL, NULL, \ | ||
| 1670 | /* prevbound */ true, &node_candidate, &subtree_candidate); \ | ||
| 1671 | if (result != NULL) { \ | ||
| 1672 | return result; \ | ||
| 1673 | } \ | ||
| 1674 | if (node_candidate != NULL) { \ | ||
| 1675 | return node_candidate; \ | ||
| 1676 | } \ | ||
| 1677 | if (subtree_candidate != NULL) { \ | ||
| 1678 | return a_prefix##last_filtered_from_node( \ | ||
| 1679 | subtree_candidate, filter_node, filter_subtree, \ | ||
| 1680 | filter_ctx); \ | ||
| 1681 | } \ | ||
| 1682 | return NULL; \ | ||
| 1683 | } \ | ||
| 1684 | a_attr a_type * \ | ||
| 1685 | a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node, \ | ||
| 1686 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ | ||
| 1687 | bool (*filter_node)(void *, a_type *), \ | ||
| 1688 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1689 | void *filter_ctx) { \ | ||
| 1690 | if (node == NULL || !filter_subtree(filter_ctx, node)) { \ | ||
| 1691 | return NULL; \ | ||
| 1692 | } \ | ||
| 1693 | a_type *ret; \ | ||
| 1694 | a_type *left = rbtn_left_get(a_type, a_field, node); \ | ||
| 1695 | a_type *right = rbtn_right_get(a_type, a_field, node); \ | ||
| 1696 | ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg, \ | ||
| 1697 | filter_node, filter_subtree, filter_ctx); \ | ||
| 1698 | if (ret != NULL) { \ | ||
| 1699 | return ret; \ | ||
| 1700 | } \ | ||
| 1701 | if (filter_node(filter_ctx, node)) { \ | ||
| 1702 | ret = cb(rbtree, node, arg); \ | ||
| 1703 | } \ | ||
| 1704 | if (ret != NULL) { \ | ||
| 1705 | return ret; \ | ||
| 1706 | } \ | ||
| 1707 | return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \ | ||
| 1708 | filter_node, filter_subtree, filter_ctx); \ | ||
| 1709 | } \ | ||
| 1710 | a_attr a_type * \ | ||
| 1711 | a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start, \ | ||
| 1712 | a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ | ||
| 1713 | void *arg, bool (*filter_node)(void *, a_type *), \ | ||
| 1714 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1715 | void *filter_ctx) { \ | ||
| 1716 | if (!filter_subtree(filter_ctx, node)) { \ | ||
| 1717 | return NULL; \ | ||
| 1718 | } \ | ||
| 1719 | int cmp = a_cmp(start, node); \ | ||
| 1720 | a_type *ret; \ | ||
| 1721 | a_type *left = rbtn_left_get(a_type, a_field, node); \ | ||
| 1722 | a_type *right = rbtn_right_get(a_type, a_field, node); \ | ||
| 1723 | if (cmp < 0) { \ | ||
| 1724 | ret = a_prefix##iter_start_filtered(rbtree, start, left, cb, \ | ||
| 1725 | arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1726 | if (ret != NULL) { \ | ||
| 1727 | return ret; \ | ||
| 1728 | } \ | ||
| 1729 | if (filter_node(filter_ctx, node)) { \ | ||
| 1730 | ret = cb(rbtree, node, arg); \ | ||
| 1731 | if (ret != NULL) { \ | ||
| 1732 | return ret; \ | ||
| 1733 | } \ | ||
| 1734 | } \ | ||
| 1735 | return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \ | ||
| 1736 | filter_node, filter_subtree, filter_ctx); \ | ||
| 1737 | } else if (cmp > 0) { \ | ||
| 1738 | return a_prefix##iter_start_filtered(rbtree, start, right, \ | ||
| 1739 | cb, arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1740 | } else { \ | ||
| 1741 | if (filter_node(filter_ctx, node)) { \ | ||
| 1742 | ret = cb(rbtree, node, arg); \ | ||
| 1743 | if (ret != NULL) { \ | ||
| 1744 | return ret; \ | ||
| 1745 | } \ | ||
| 1746 | } \ | ||
| 1747 | return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \ | ||
| 1748 | filter_node, filter_subtree, filter_ctx); \ | ||
| 1749 | } \ | ||
| 1750 | } \ | ||
| 1751 | a_attr a_type * \ | ||
| 1752 | a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \ | ||
| 1753 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ | ||
| 1754 | bool (*filter_node)(void *, a_type *), \ | ||
| 1755 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1756 | void *filter_ctx) { \ | ||
| 1757 | a_type *ret; \ | ||
| 1758 | if (start != NULL) { \ | ||
| 1759 | ret = a_prefix##iter_start_filtered(rbtree, start, \ | ||
| 1760 | rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \ | ||
| 1761 | filter_ctx); \ | ||
| 1762 | } else { \ | ||
| 1763 | ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root, \ | ||
| 1764 | cb, arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1765 | } \ | ||
| 1766 | return ret; \ | ||
| 1767 | } \ | ||
| 1768 | a_attr a_type * \ | ||
| 1769 | a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree, \ | ||
| 1770 | a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ | ||
| 1771 | void *arg, \ | ||
| 1772 | bool (*filter_node)(void *, a_type *), \ | ||
| 1773 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1774 | void *filter_ctx) { \ | ||
| 1775 | if (node == NULL || !filter_subtree(filter_ctx, node)) { \ | ||
| 1776 | return NULL; \ | ||
| 1777 | } \ | ||
| 1778 | a_type *ret; \ | ||
| 1779 | a_type *left = rbtn_left_get(a_type, a_field, node); \ | ||
| 1780 | a_type *right = rbtn_right_get(a_type, a_field, node); \ | ||
| 1781 | ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb, \ | ||
| 1782 | arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1783 | if (ret != NULL) { \ | ||
| 1784 | return ret; \ | ||
| 1785 | } \ | ||
| 1786 | if (filter_node(filter_ctx, node)) { \ | ||
| 1787 | ret = cb(rbtree, node, arg); \ | ||
| 1788 | } \ | ||
| 1789 | if (ret != NULL) { \ | ||
| 1790 | return ret; \ | ||
| 1791 | } \ | ||
| 1792 | return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb, \ | ||
| 1793 | arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1794 | } \ | ||
| 1795 | a_attr a_type * \ | ||
| 1796 | a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\ | ||
| 1797 | a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ | ||
| 1798 | void *arg, bool (*filter_node)(void *, a_type *), \ | ||
| 1799 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1800 | void *filter_ctx) { \ | ||
| 1801 | if (!filter_subtree(filter_ctx, node)) { \ | ||
| 1802 | return NULL; \ | ||
| 1803 | } \ | ||
| 1804 | int cmp = a_cmp(start, node); \ | ||
| 1805 | a_type *ret; \ | ||
| 1806 | a_type *left = rbtn_left_get(a_type, a_field, node); \ | ||
| 1807 | a_type *right = rbtn_right_get(a_type, a_field, node); \ | ||
| 1808 | if (cmp > 0) { \ | ||
| 1809 | ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \ | ||
| 1810 | right, cb, arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1811 | if (ret != NULL) { \ | ||
| 1812 | return ret; \ | ||
| 1813 | } \ | ||
| 1814 | if (filter_node(filter_ctx, node)) { \ | ||
| 1815 | ret = cb(rbtree, node, arg); \ | ||
| 1816 | if (ret != NULL) { \ | ||
| 1817 | return ret; \ | ||
| 1818 | } \ | ||
| 1819 | } \ | ||
| 1820 | return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\ | ||
| 1821 | arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1822 | } else if (cmp < 0) { \ | ||
| 1823 | return a_prefix##reverse_iter_start_filtered(rbtree, start, \ | ||
| 1824 | left, cb, arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1825 | } else { \ | ||
| 1826 | if (filter_node(filter_ctx, node)) { \ | ||
| 1827 | ret = cb(rbtree, node, arg); \ | ||
| 1828 | if (ret != NULL) { \ | ||
| 1829 | return ret; \ | ||
| 1830 | } \ | ||
| 1831 | } \ | ||
| 1832 | return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\ | ||
| 1833 | arg, filter_node, filter_subtree, filter_ctx); \ | ||
| 1834 | } \ | ||
| 1835 | } \ | ||
| 1836 | a_attr a_type * \ | ||
| 1837 | a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \ | ||
| 1838 | a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ | ||
| 1839 | bool (*filter_node)(void *, a_type *), \ | ||
| 1840 | bool (*filter_subtree)(void *, a_type *), \ | ||
| 1841 | void *filter_ctx) { \ | ||
| 1842 | a_type *ret; \ | ||
| 1843 | if (start != NULL) { \ | ||
| 1844 | ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \ | ||
| 1845 | rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \ | ||
| 1846 | filter_ctx); \ | ||
| 1847 | } else { \ | ||
| 1848 | ret = a_prefix##reverse_iter_recurse_filtered(rbtree, \ | ||
| 1849 | rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \ | ||
| 1850 | filter_ctx); \ | ||
| 1851 | } \ | ||
| 1852 | return ret; \ | ||
| 1853 | } \ | ||
| 1854 | ) /* end rb_summarized_only */ | ||
| 1855 | |||
| 1856 | #endif /* JEMALLOC_INTERNAL_RB_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rtree.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rtree.h deleted file mode 100644 index a00adb2..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rtree.h +++ /dev/null | |||
| @@ -1,554 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_RTREE_H | ||
| 2 | #define JEMALLOC_INTERNAL_RTREE_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/mutex.h" | ||
| 6 | #include "jemalloc/internal/rtree_tsd.h" | ||
| 7 | #include "jemalloc/internal/sc.h" | ||
| 8 | #include "jemalloc/internal/tsd.h" | ||
| 9 | |||
| 10 | /* | ||
| 11 | * This radix tree implementation is tailored to the singular purpose of | ||
| 12 | * associating metadata with extents that are currently owned by jemalloc. | ||
| 13 | * | ||
| 14 | ******************************************************************************* | ||
| 15 | */ | ||
| 16 | |||
| 17 | /* Number of high insignificant bits. */ | ||
| 18 | #define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) | ||
| 19 | /* Number of low insigificant bits. */ | ||
| 20 | #define RTREE_NLIB LG_PAGE | ||
| 21 | /* Number of significant bits. */ | ||
| 22 | #define RTREE_NSB (LG_VADDR - RTREE_NLIB) | ||
| 23 | /* Number of levels in radix tree. */ | ||
| 24 | #if RTREE_NSB <= 10 | ||
| 25 | # define RTREE_HEIGHT 1 | ||
| 26 | #elif RTREE_NSB <= 36 | ||
| 27 | # define RTREE_HEIGHT 2 | ||
| 28 | #elif RTREE_NSB <= 52 | ||
| 29 | # define RTREE_HEIGHT 3 | ||
| 30 | #else | ||
| 31 | # error Unsupported number of significant virtual address bits | ||
| 32 | #endif | ||
| 33 | /* Use compact leaf representation if virtual address encoding allows. */ | ||
| 34 | #if RTREE_NHIB >= LG_CEIL(SC_NSIZES) | ||
| 35 | # define RTREE_LEAF_COMPACT | ||
| 36 | #endif | ||
| 37 | |||
| 38 | typedef struct rtree_node_elm_s rtree_node_elm_t; | ||
| 39 | struct rtree_node_elm_s { | ||
| 40 | atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ | ||
| 41 | }; | ||
| 42 | |||
| 43 | typedef struct rtree_metadata_s rtree_metadata_t; | ||
| 44 | struct rtree_metadata_s { | ||
| 45 | szind_t szind; | ||
| 46 | extent_state_t state; /* Mirrors edata->state. */ | ||
| 47 | bool is_head; /* Mirrors edata->is_head. */ | ||
| 48 | bool slab; | ||
| 49 | }; | ||
| 50 | |||
| 51 | typedef struct rtree_contents_s rtree_contents_t; | ||
| 52 | struct rtree_contents_s { | ||
| 53 | edata_t *edata; | ||
| 54 | rtree_metadata_t metadata; | ||
| 55 | }; | ||
| 56 | |||
| 57 | #define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH | ||
| 58 | #define RTREE_LEAF_STATE_SHIFT 2 | ||
| 59 | #define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT) | ||
| 60 | |||
| 61 | struct rtree_leaf_elm_s { | ||
| 62 | #ifdef RTREE_LEAF_COMPACT | ||
| 63 | /* | ||
| 64 | * Single pointer-width field containing all three leaf element fields. | ||
| 65 | * For example, on a 64-bit x64 system with 48 significant virtual | ||
| 66 | * memory address bits, the index, edata, and slab fields are packed as | ||
| 67 | * such: | ||
| 68 | * | ||
| 69 | * x: index | ||
| 70 | * e: edata | ||
| 71 | * s: state | ||
| 72 | * h: is_head | ||
| 73 | * b: slab | ||
| 74 | * | ||
| 75 | * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb | ||
| 76 | */ | ||
| 77 | atomic_p_t le_bits; | ||
| 78 | #else | ||
| 79 | atomic_p_t le_edata; /* (edata_t *) */ | ||
| 80 | /* | ||
| 81 | * From high to low bits: szind (8 bits), state (4 bits), is_head, slab | ||
| 82 | */ | ||
| 83 | atomic_u_t le_metadata; | ||
| 84 | #endif | ||
| 85 | }; | ||
| 86 | |||
| 87 | typedef struct rtree_level_s rtree_level_t; | ||
| 88 | struct rtree_level_s { | ||
| 89 | /* Number of key bits distinguished by this level. */ | ||
| 90 | unsigned bits; | ||
| 91 | /* | ||
| 92 | * Cumulative number of key bits distinguished by traversing to | ||
| 93 | * corresponding tree level. | ||
| 94 | */ | ||
| 95 | unsigned cumbits; | ||
| 96 | }; | ||
| 97 | |||
| 98 | typedef struct rtree_s rtree_t; | ||
| 99 | struct rtree_s { | ||
| 100 | base_t *base; | ||
| 101 | malloc_mutex_t init_lock; | ||
| 102 | /* Number of elements based on rtree_levels[0].bits. */ | ||
| 103 | #if RTREE_HEIGHT > 1 | ||
| 104 | rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; | ||
| 105 | #else | ||
| 106 | rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; | ||
| 107 | #endif | ||
| 108 | }; | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Split the bits into one to three partitions depending on number of | ||
| 112 | * significant bits. It the number of bits does not divide evenly into the | ||
| 113 | * number of levels, place one remainder bit per level starting at the leaf | ||
| 114 | * level. | ||
| 115 | */ | ||
| 116 | static const rtree_level_t rtree_levels[] = { | ||
| 117 | #if RTREE_HEIGHT == 1 | ||
| 118 | {RTREE_NSB, RTREE_NHIB + RTREE_NSB} | ||
| 119 | #elif RTREE_HEIGHT == 2 | ||
| 120 | {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, | ||
| 121 | {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} | ||
| 122 | #elif RTREE_HEIGHT == 3 | ||
| 123 | {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, | ||
| 124 | {RTREE_NSB/3 + RTREE_NSB%3/2, | ||
| 125 | RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, | ||
| 126 | {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} | ||
| 127 | #else | ||
| 128 | # error Unsupported rtree height | ||
| 129 | #endif | ||
| 130 | }; | ||
| 131 | |||
| 132 | bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed); | ||
| 133 | |||
| 134 | rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, | ||
| 135 | rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); | ||
| 136 | |||
| 137 | JEMALLOC_ALWAYS_INLINE unsigned | ||
| 138 | rtree_leaf_maskbits(void) { | ||
| 139 | unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); | ||
| 140 | unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - | ||
| 141 | rtree_levels[RTREE_HEIGHT-1].bits); | ||
| 142 | return ptrbits - cumbits; | ||
| 143 | } | ||
| 144 | |||
| 145 | JEMALLOC_ALWAYS_INLINE uintptr_t | ||
| 146 | rtree_leafkey(uintptr_t key) { | ||
| 147 | uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1); | ||
| 148 | return (key & mask); | ||
| 149 | } | ||
| 150 | |||
| 151 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 152 | rtree_cache_direct_map(uintptr_t key) { | ||
| 153 | return (size_t)((key >> rtree_leaf_maskbits()) & | ||
| 154 | (RTREE_CTX_NCACHE - 1)); | ||
| 155 | } | ||
| 156 | |||
| 157 | JEMALLOC_ALWAYS_INLINE uintptr_t | ||
| 158 | rtree_subkey(uintptr_t key, unsigned level) { | ||
| 159 | unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); | ||
| 160 | unsigned cumbits = rtree_levels[level].cumbits; | ||
| 161 | unsigned shiftbits = ptrbits - cumbits; | ||
| 162 | unsigned maskbits = rtree_levels[level].bits; | ||
| 163 | uintptr_t mask = (ZU(1) << maskbits) - 1; | ||
| 164 | return ((key >> shiftbits) & mask); | ||
| 165 | } | ||
| 166 | |||
| 167 | /* | ||
| 168 | * Atomic getters. | ||
| 169 | * | ||
| 170 | * dependent: Reading a value on behalf of a pointer to a valid allocation | ||
| 171 | * is guaranteed to be a clean read even without synchronization, | ||
| 172 | * because the rtree update became visible in memory before the | ||
| 173 | * pointer came into existence. | ||
| 174 | * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be | ||
| 175 | * dependent on a previous rtree write, which means a stale read | ||
| 176 | * could result if synchronization were omitted here. | ||
| 177 | */ | ||
| 178 | # ifdef RTREE_LEAF_COMPACT | ||
| 179 | JEMALLOC_ALWAYS_INLINE uintptr_t | ||
| 180 | rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, | ||
| 181 | rtree_leaf_elm_t *elm, bool dependent) { | ||
| 182 | return (uintptr_t)atomic_load_p(&elm->le_bits, dependent | ||
| 183 | ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); | ||
| 184 | } | ||
| 185 | |||
| 186 | JEMALLOC_ALWAYS_INLINE uintptr_t | ||
| 187 | rtree_leaf_elm_bits_encode(rtree_contents_t contents) { | ||
| 188 | assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0); | ||
| 189 | uintptr_t edata_bits = (uintptr_t)contents.edata | ||
| 190 | & (((uintptr_t)1 << LG_VADDR) - 1); | ||
| 191 | |||
| 192 | uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR; | ||
| 193 | uintptr_t slab_bits = (uintptr_t)contents.metadata.slab; | ||
| 194 | uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1; | ||
| 195 | uintptr_t state_bits = (uintptr_t)contents.metadata.state << | ||
| 196 | RTREE_LEAF_STATE_SHIFT; | ||
| 197 | uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits | | ||
| 198 | slab_bits; | ||
| 199 | assert((edata_bits & metadata_bits) == 0); | ||
| 200 | |||
| 201 | return edata_bits | metadata_bits; | ||
| 202 | } | ||
| 203 | |||
| 204 | JEMALLOC_ALWAYS_INLINE rtree_contents_t | ||
| 205 | rtree_leaf_elm_bits_decode(uintptr_t bits) { | ||
| 206 | rtree_contents_t contents; | ||
| 207 | /* Do the easy things first. */ | ||
| 208 | contents.metadata.szind = bits >> LG_VADDR; | ||
| 209 | contents.metadata.slab = (bool)(bits & 1); | ||
| 210 | contents.metadata.is_head = (bool)(bits & (1 << 1)); | ||
| 211 | |||
| 212 | uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >> | ||
| 213 | RTREE_LEAF_STATE_SHIFT; | ||
| 214 | assert(state_bits <= extent_state_max); | ||
| 215 | contents.metadata.state = (extent_state_t)state_bits; | ||
| 216 | |||
| 217 | uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1); | ||
| 218 | # ifdef __aarch64__ | ||
| 219 | /* | ||
| 220 | * aarch64 doesn't sign extend the highest virtual address bit to set | ||
| 221 | * the higher ones. Instead, the high bits get zeroed. | ||
| 222 | */ | ||
| 223 | uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; | ||
| 224 | /* Mask off metadata. */ | ||
| 225 | uintptr_t mask = high_bit_mask & low_bit_mask; | ||
| 226 | contents.edata = (edata_t *)(bits & mask); | ||
| 227 | # else | ||
| 228 | /* Restore sign-extended high bits, mask metadata bits. */ | ||
| 229 | contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) | ||
| 230 | >> RTREE_NHIB) & low_bit_mask); | ||
| 231 | # endif | ||
| 232 | assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0); | ||
| 233 | return contents; | ||
| 234 | } | ||
| 235 | |||
| 236 | # endif /* RTREE_LEAF_COMPACT */ | ||
| 237 | |||
| 238 | JEMALLOC_ALWAYS_INLINE rtree_contents_t | ||
| 239 | rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, | ||
| 240 | bool dependent) { | ||
| 241 | #ifdef RTREE_LEAF_COMPACT | ||
| 242 | uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); | ||
| 243 | rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits); | ||
| 244 | return contents; | ||
| 245 | #else | ||
| 246 | rtree_contents_t contents; | ||
| 247 | unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent | ||
| 248 | ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); | ||
| 249 | contents.metadata.slab = (bool)(metadata_bits & 1); | ||
| 250 | contents.metadata.is_head = (bool)(metadata_bits & (1 << 1)); | ||
| 251 | |||
| 252 | uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >> | ||
| 253 | RTREE_LEAF_STATE_SHIFT; | ||
| 254 | assert(state_bits <= extent_state_max); | ||
| 255 | contents.metadata.state = (extent_state_t)state_bits; | ||
| 256 | contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT + | ||
| 257 | RTREE_LEAF_STATE_WIDTH); | ||
| 258 | |||
| 259 | contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent | ||
| 260 | ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); | ||
| 261 | |||
| 262 | return contents; | ||
| 263 | #endif | ||
| 264 | } | ||
| 265 | |||
| 266 | JEMALLOC_ALWAYS_INLINE void | ||
| 267 | rtree_contents_encode(rtree_contents_t contents, void **bits, | ||
| 268 | unsigned *additional) { | ||
| 269 | #ifdef RTREE_LEAF_COMPACT | ||
| 270 | *bits = (void *)rtree_leaf_elm_bits_encode(contents); | ||
| 271 | #else | ||
| 272 | *additional = (unsigned)contents.metadata.slab | ||
| 273 | | ((unsigned)contents.metadata.is_head << 1) | ||
| 274 | | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT) | ||
| 275 | | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT + | ||
| 276 | RTREE_LEAF_STATE_WIDTH)); | ||
| 277 | *bits = contents.edata; | ||
| 278 | #endif | ||
| 279 | } | ||
| 280 | |||
| 281 | JEMALLOC_ALWAYS_INLINE void | ||
| 282 | rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree, | ||
| 283 | rtree_leaf_elm_t *elm, void *bits, unsigned additional) { | ||
| 284 | #ifdef RTREE_LEAF_COMPACT | ||
| 285 | atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE); | ||
| 286 | #else | ||
| 287 | atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE); | ||
| 288 | /* | ||
| 289 | * Write edata last, since the element is atomically considered valid | ||
| 290 | * as soon as the edata field is non-NULL. | ||
| 291 | */ | ||
| 292 | atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE); | ||
| 293 | #endif | ||
| 294 | } | ||
| 295 | |||
| 296 | JEMALLOC_ALWAYS_INLINE void | ||
| 297 | rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, | ||
| 298 | rtree_leaf_elm_t *elm, rtree_contents_t contents) { | ||
| 299 | assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0); | ||
| 300 | void *bits; | ||
| 301 | unsigned additional; | ||
| 302 | |||
| 303 | rtree_contents_encode(contents, &bits, &additional); | ||
| 304 | rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional); | ||
| 305 | } | ||
| 306 | |||
| 307 | /* The state field can be updated independently (and more frequently). */ | ||
| 308 | JEMALLOC_ALWAYS_INLINE void | ||
| 309 | rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree, | ||
| 310 | rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) { | ||
| 311 | assert(elm1 != NULL); | ||
| 312 | #ifdef RTREE_LEAF_COMPACT | ||
| 313 | uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1, | ||
| 314 | /* dependent */ true); | ||
| 315 | bits &= ~RTREE_LEAF_STATE_MASK; | ||
| 316 | bits |= state << RTREE_LEAF_STATE_SHIFT; | ||
| 317 | atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE); | ||
| 318 | if (elm2 != NULL) { | ||
| 319 | atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE); | ||
| 320 | } | ||
| 321 | #else | ||
| 322 | unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED); | ||
| 323 | bits &= ~RTREE_LEAF_STATE_MASK; | ||
| 324 | bits |= state << RTREE_LEAF_STATE_SHIFT; | ||
| 325 | atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE); | ||
| 326 | if (elm2 != NULL) { | ||
| 327 | atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE); | ||
| 328 | } | ||
| 329 | #endif | ||
| 330 | } | ||
| 331 | |||
| 332 | /* | ||
| 333 | * Tries to look up the key in the L1 cache, returning false if there's a hit, or | ||
| 334 | * true if there's a miss. | ||
| 335 | * Key is allowed to be NULL; returns true in this case. | ||
| 336 | */ | ||
| 337 | JEMALLOC_ALWAYS_INLINE bool | ||
| 338 | rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 339 | uintptr_t key, rtree_leaf_elm_t **elm) { | ||
| 340 | size_t slot = rtree_cache_direct_map(key); | ||
| 341 | uintptr_t leafkey = rtree_leafkey(key); | ||
| 342 | assert(leafkey != RTREE_LEAFKEY_INVALID); | ||
| 343 | |||
| 344 | if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) { | ||
| 345 | return true; | ||
| 346 | } | ||
| 347 | |||
| 348 | rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; | ||
| 349 | assert(leaf != NULL); | ||
| 350 | uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); | ||
| 351 | *elm = &leaf[subkey]; | ||
| 352 | |||
| 353 | return false; | ||
| 354 | } | ||
| 355 | |||
| 356 | JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * | ||
| 357 | rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 358 | uintptr_t key, bool dependent, bool init_missing) { | ||
| 359 | assert(key != 0); | ||
| 360 | assert(!dependent || !init_missing); | ||
| 361 | |||
| 362 | size_t slot = rtree_cache_direct_map(key); | ||
| 363 | uintptr_t leafkey = rtree_leafkey(key); | ||
| 364 | assert(leafkey != RTREE_LEAFKEY_INVALID); | ||
| 365 | |||
| 366 | /* Fast path: L1 direct mapped cache. */ | ||
| 367 | if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { | ||
| 368 | rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; | ||
| 369 | assert(leaf != NULL); | ||
| 370 | uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); | ||
| 371 | return &leaf[subkey]; | ||
| 372 | } | ||
| 373 | /* | ||
| 374 | * Search the L2 LRU cache. On hit, swap the matching element into the | ||
| 375 | * slot in L1 cache, and move the position in L2 up by 1. | ||
| 376 | */ | ||
| 377 | #define RTREE_CACHE_CHECK_L2(i) do { \ | ||
| 378 | if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ | ||
| 379 | rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ | ||
| 380 | assert(leaf != NULL); \ | ||
| 381 | if (i > 0) { \ | ||
| 382 | /* Bubble up by one. */ \ | ||
| 383 | rtree_ctx->l2_cache[i].leafkey = \ | ||
| 384 | rtree_ctx->l2_cache[i - 1].leafkey; \ | ||
| 385 | rtree_ctx->l2_cache[i].leaf = \ | ||
| 386 | rtree_ctx->l2_cache[i - 1].leaf; \ | ||
| 387 | rtree_ctx->l2_cache[i - 1].leafkey = \ | ||
| 388 | rtree_ctx->cache[slot].leafkey; \ | ||
| 389 | rtree_ctx->l2_cache[i - 1].leaf = \ | ||
| 390 | rtree_ctx->cache[slot].leaf; \ | ||
| 391 | } else { \ | ||
| 392 | rtree_ctx->l2_cache[0].leafkey = \ | ||
| 393 | rtree_ctx->cache[slot].leafkey; \ | ||
| 394 | rtree_ctx->l2_cache[0].leaf = \ | ||
| 395 | rtree_ctx->cache[slot].leaf; \ | ||
| 396 | } \ | ||
| 397 | rtree_ctx->cache[slot].leafkey = leafkey; \ | ||
| 398 | rtree_ctx->cache[slot].leaf = leaf; \ | ||
| 399 | uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ | ||
| 400 | return &leaf[subkey]; \ | ||
| 401 | } \ | ||
| 402 | } while (0) | ||
| 403 | /* Check the first cache entry. */ | ||
| 404 | RTREE_CACHE_CHECK_L2(0); | ||
| 405 | /* Search the remaining cache elements. */ | ||
| 406 | for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) { | ||
| 407 | RTREE_CACHE_CHECK_L2(i); | ||
| 408 | } | ||
| 409 | #undef RTREE_CACHE_CHECK_L2 | ||
| 410 | |||
| 411 | return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, | ||
| 412 | dependent, init_missing); | ||
| 413 | } | ||
| 414 | |||
| 415 | /* | ||
| 416 | * Returns true on lookup failure. | ||
| 417 | */ | ||
| 418 | static inline bool | ||
| 419 | rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 420 | uintptr_t key, rtree_contents_t *r_contents) { | ||
| 421 | rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, | ||
| 422 | key, /* dependent */ false, /* init_missing */ false); | ||
| 423 | if (elm == NULL) { | ||
| 424 | return true; | ||
| 425 | } | ||
| 426 | *r_contents = rtree_leaf_elm_read(tsdn, rtree, elm, | ||
| 427 | /* dependent */ false); | ||
| 428 | return false; | ||
| 429 | } | ||
| 430 | |||
| 431 | static inline rtree_contents_t | ||
| 432 | rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 433 | uintptr_t key) { | ||
| 434 | rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, | ||
| 435 | key, /* dependent */ true, /* init_missing */ false); | ||
| 436 | assert(elm != NULL); | ||
| 437 | return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true); | ||
| 438 | } | ||
| 439 | |||
| 440 | static inline rtree_metadata_t | ||
| 441 | rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 442 | uintptr_t key) { | ||
| 443 | rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, | ||
| 444 | key, /* dependent */ true, /* init_missing */ false); | ||
| 445 | assert(elm != NULL); | ||
| 446 | return rtree_leaf_elm_read(tsdn, rtree, elm, | ||
| 447 | /* dependent */ true).metadata; | ||
| 448 | } | ||
| 449 | |||
| 450 | /* | ||
| 451 | * Returns true when the request cannot be fulfilled by fastpath. | ||
| 452 | */ | ||
| 453 | static inline bool | ||
| 454 | rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 455 | uintptr_t key, rtree_metadata_t *r_rtree_metadata) { | ||
| 456 | rtree_leaf_elm_t *elm; | ||
| 457 | /* | ||
| 458 | * Should check the bool return value (lookup success or not) instead of | ||
| 459 | * elm == NULL (which will result in an extra branch). This is because | ||
| 460 | * when the cache lookup succeeds, there will never be a NULL pointer | ||
| 461 | * returned (which is unknown to the compiler). | ||
| 462 | */ | ||
| 463 | if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) { | ||
| 464 | return true; | ||
| 465 | } | ||
| 466 | assert(elm != NULL); | ||
| 467 | *r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm, | ||
| 468 | /* dependent */ true).metadata; | ||
| 469 | return false; | ||
| 470 | } | ||
| 471 | |||
| 472 | JEMALLOC_ALWAYS_INLINE void | ||
| 473 | rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 474 | uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) { | ||
| 475 | assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0); | ||
| 476 | /* | ||
| 477 | * Only used for emap_(de)register_interior, which implies the | ||
| 478 | * boundaries have been registered already. Therefore all the lookups | ||
| 479 | * are dependent w/o init_missing, assuming the range spans across at | ||
| 480 | * most 2 rtree leaf nodes (each covers 1 GiB of vaddr). | ||
| 481 | */ | ||
| 482 | void *bits; | ||
| 483 | unsigned additional; | ||
| 484 | rtree_contents_encode(contents, &bits, &additional); | ||
| 485 | |||
| 486 | rtree_leaf_elm_t *elm = NULL; /* Dead store. */ | ||
| 487 | for (uintptr_t addr = base; addr <= end; addr += PAGE) { | ||
| 488 | if (addr == base || | ||
| 489 | (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) { | ||
| 490 | elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr, | ||
| 491 | /* dependent */ true, /* init_missing */ false); | ||
| 492 | assert(elm != NULL); | ||
| 493 | } | ||
| 494 | assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr, | ||
| 495 | /* dependent */ true, /* init_missing */ false)); | ||
| 496 | assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm, | ||
| 497 | /* dependent */ true).edata != NULL); | ||
| 498 | rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional); | ||
| 499 | elm++; | ||
| 500 | } | ||
| 501 | } | ||
| 502 | |||
| 503 | JEMALLOC_ALWAYS_INLINE void | ||
| 504 | rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 505 | uintptr_t base, uintptr_t end, rtree_contents_t contents) { | ||
| 506 | rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents, | ||
| 507 | /* clearing */ false); | ||
| 508 | } | ||
| 509 | |||
| 510 | JEMALLOC_ALWAYS_INLINE bool | ||
| 511 | rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, | ||
| 512 | rtree_contents_t contents) { | ||
| 513 | rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, | ||
| 514 | key, /* dependent */ false, /* init_missing */ true); | ||
| 515 | if (elm == NULL) { | ||
| 516 | return true; | ||
| 517 | } | ||
| 518 | |||
| 519 | rtree_leaf_elm_write(tsdn, rtree, elm, contents); | ||
| 520 | |||
| 521 | return false; | ||
| 522 | } | ||
| 523 | |||
| 524 | static inline void | ||
| 525 | rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 526 | uintptr_t key) { | ||
| 527 | rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, | ||
| 528 | key, /* dependent */ true, /* init_missing */ false); | ||
| 529 | assert(elm != NULL); | ||
| 530 | assert(rtree_leaf_elm_read(tsdn, rtree, elm, | ||
| 531 | /* dependent */ true).edata != NULL); | ||
| 532 | rtree_contents_t contents; | ||
| 533 | contents.edata = NULL; | ||
| 534 | contents.metadata.szind = SC_NSIZES; | ||
| 535 | contents.metadata.slab = false; | ||
| 536 | contents.metadata.is_head = false; | ||
| 537 | contents.metadata.state = (extent_state_t)0; | ||
| 538 | rtree_leaf_elm_write(tsdn, rtree, elm, contents); | ||
| 539 | } | ||
| 540 | |||
| 541 | static inline void | ||
| 542 | rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, | ||
| 543 | uintptr_t base, uintptr_t end) { | ||
| 544 | rtree_contents_t contents; | ||
| 545 | contents.edata = NULL; | ||
| 546 | contents.metadata.szind = SC_NSIZES; | ||
| 547 | contents.metadata.slab = false; | ||
| 548 | contents.metadata.is_head = false; | ||
| 549 | contents.metadata.state = (extent_state_t)0; | ||
| 550 | rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents, | ||
| 551 | /* clearing */ true); | ||
| 552 | } | ||
| 553 | |||
| 554 | #endif /* JEMALLOC_INTERNAL_RTREE_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h deleted file mode 100644 index e45525c..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h +++ /dev/null | |||
| @@ -1,62 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_RTREE_CTX_H | ||
| 2 | #define JEMALLOC_INTERNAL_RTREE_CTX_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each | ||
| 6 | * entry supports an entire leaf, so the cache hit rate is typically high even | ||
| 7 | * with a small number of entries. In rare cases extent activity will straddle | ||
| 8 | * the boundary between two leaf nodes. Furthermore, an arena may use a | ||
| 9 | * combination of dss and mmap. Note that as memory usage grows past the amount | ||
| 10 | * that this cache can directly cover, the cache will become less effective if | ||
| 11 | * locality of reference is low, but the consequence is merely cache misses | ||
| 12 | * while traversing the tree nodes. | ||
| 13 | * | ||
| 14 | * The L1 direct mapped cache offers consistent and low cost on cache hit. | ||
| 15 | * However collision could affect hit rate negatively. This is resolved by | ||
| 16 | * combining with a L2 LRU cache, which requires linear search and re-ordering | ||
| 17 | * on access but suffers no collision. Note that, the cache will itself suffer | ||
| 18 | * cache misses if made overly large, plus the cost of linear search in the LRU | ||
| 19 | * cache. | ||
| 20 | */ | ||
| 21 | #define RTREE_CTX_NCACHE 16 | ||
| 22 | #define RTREE_CTX_NCACHE_L2 8 | ||
| 23 | |||
| 24 | /* Needed for initialization only. */ | ||
| 25 | #define RTREE_LEAFKEY_INVALID ((uintptr_t)1) | ||
| 26 | #define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL} | ||
| 27 | |||
| 28 | #define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID | ||
| 29 | #define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1 | ||
| 30 | #define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2 | ||
| 31 | #define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4 | ||
| 32 | #define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8 | ||
| 33 | |||
| 34 | #define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n | ||
| 35 | #define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n) | ||
| 36 | |||
| 37 | /* | ||
| 38 | * Static initializer (to invalidate the cache entries) is required because the | ||
| 39 | * free fastpath may access the rtree cache before a full tsd initialization. | ||
| 40 | */ | ||
| 41 | #define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \ | ||
| 42 | {RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}} | ||
| 43 | |||
| 44 | typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; | ||
| 45 | |||
| 46 | typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; | ||
| 47 | struct rtree_ctx_cache_elm_s { | ||
| 48 | uintptr_t leafkey; | ||
| 49 | rtree_leaf_elm_t *leaf; | ||
| 50 | }; | ||
| 51 | |||
| 52 | typedef struct rtree_ctx_s rtree_ctx_t; | ||
| 53 | struct rtree_ctx_s { | ||
| 54 | /* Direct mapped cache. */ | ||
| 55 | rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; | ||
| 56 | /* L2 LRU cache. */ | ||
| 57 | rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; | ||
| 58 | }; | ||
| 59 | |||
| 60 | void rtree_ctx_data_init(rtree_ctx_t *ctx); | ||
| 61 | |||
| 62 | #endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/safety_check.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/safety_check.h deleted file mode 100644 index f1a74f1..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/safety_check.h +++ /dev/null | |||
| @@ -1,31 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H | ||
| 2 | #define JEMALLOC_INTERNAL_SAFETY_CHECK_H | ||
| 3 | |||
| 4 | void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr, | ||
| 5 | size_t true_size, size_t input_size); | ||
| 6 | void safety_check_fail(const char *format, ...); | ||
| 7 | |||
| 8 | typedef void (*safety_check_abort_hook_t)(const char *message); | ||
| 9 | |||
| 10 | /* Can set to NULL for a default. */ | ||
| 11 | void safety_check_set_abort(safety_check_abort_hook_t abort_fn); | ||
| 12 | |||
| 13 | JEMALLOC_ALWAYS_INLINE void | ||
| 14 | safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { | ||
| 15 | assert(usize < bumped_usize); | ||
| 16 | for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { | ||
| 17 | *((unsigned char *)ptr + i) = 0xBC; | ||
| 18 | } | ||
| 19 | } | ||
| 20 | |||
| 21 | JEMALLOC_ALWAYS_INLINE void | ||
| 22 | safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize) | ||
| 23 | { | ||
| 24 | for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { | ||
| 25 | if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) { | ||
| 26 | safety_check_fail("Use after free error\n"); | ||
| 27 | } | ||
| 28 | } | ||
| 29 | } | ||
| 30 | |||
| 31 | #endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/san.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/san.h deleted file mode 100644 index 8813d6b..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/san.h +++ /dev/null | |||
| @@ -1,191 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_GUARD_H | ||
| 2 | #define JEMALLOC_INTERNAL_GUARD_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/ehooks.h" | ||
| 5 | #include "jemalloc/internal/emap.h" | ||
| 6 | |||
| 7 | #define SAN_PAGE_GUARD PAGE | ||
| 8 | #define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2) | ||
| 9 | |||
| 10 | #define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0 | ||
| 11 | #define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0 | ||
| 12 | |||
| 13 | #define SAN_LG_UAF_ALIGN_DEFAULT (-1) | ||
| 14 | #define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1) | ||
| 15 | |||
| 16 | static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL; | ||
| 17 | |||
| 18 | /* 0 means disabled, i.e. never guarded. */ | ||
| 19 | extern size_t opt_san_guard_large; | ||
| 20 | extern size_t opt_san_guard_small; | ||
| 21 | /* -1 means disabled, i.e. never check for use-after-free. */ | ||
| 22 | extern ssize_t opt_lg_san_uaf_align; | ||
| 23 | |||
| 24 | void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 25 | emap_t *emap, bool left, bool right, bool remap); | ||
| 26 | void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 27 | emap_t *emap, bool left, bool right); | ||
| 28 | /* | ||
| 29 | * Unguard the extent, but don't modify emap boundaries. Must be called on an | ||
| 30 | * extent that has been erased from emap and shouldn't be placed back. | ||
| 31 | */ | ||
| 32 | void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, | ||
| 33 | edata_t *edata, emap_t *emap); | ||
| 34 | void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize); | ||
| 35 | |||
| 36 | void tsd_san_init(tsd_t *tsd); | ||
| 37 | void san_init(ssize_t lg_san_uaf_align); | ||
| 38 | |||
| 39 | static inline void | ||
| 40 | san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 41 | emap_t *emap, bool remap) { | ||
| 42 | san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap); | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void | ||
| 46 | san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, | ||
| 47 | emap_t *emap) { | ||
| 48 | san_unguard_pages(tsdn, ehooks, edata, emap, true, true); | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline size_t | ||
| 52 | san_two_side_unguarded_sz(size_t size) { | ||
| 53 | assert(size % PAGE == 0); | ||
| 54 | assert(size >= SAN_PAGE_GUARDS_SIZE); | ||
| 55 | return size - SAN_PAGE_GUARDS_SIZE; | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline size_t | ||
| 59 | san_two_side_guarded_sz(size_t size) { | ||
| 60 | assert(size % PAGE == 0); | ||
| 61 | return size + SAN_PAGE_GUARDS_SIZE; | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline size_t | ||
| 65 | san_one_side_unguarded_sz(size_t size) { | ||
| 66 | assert(size % PAGE == 0); | ||
| 67 | assert(size >= SAN_PAGE_GUARD); | ||
| 68 | return size - SAN_PAGE_GUARD; | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline size_t | ||
| 72 | san_one_side_guarded_sz(size_t size) { | ||
| 73 | assert(size % PAGE == 0); | ||
| 74 | return size + SAN_PAGE_GUARD; | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline bool | ||
| 78 | san_guard_enabled(void) { | ||
| 79 | return (opt_san_guard_large != 0 || opt_san_guard_small != 0); | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline bool | ||
| 83 | san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size, | ||
| 84 | size_t alignment) { | ||
| 85 | if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) || | ||
| 86 | tsdn_null(tsdn)) { | ||
| 87 | return false; | ||
| 88 | } | ||
| 89 | |||
| 90 | tsd_t *tsd = tsdn_tsd(tsdn); | ||
| 91 | uint64_t n = tsd_san_extents_until_guard_large_get(tsd); | ||
| 92 | assert(n >= 1); | ||
| 93 | if (n > 1) { | ||
| 94 | /* | ||
| 95 | * Subtract conditionally because the guard may not happen due | ||
| 96 | * to alignment or size restriction below. | ||
| 97 | */ | ||
| 98 | *tsd_san_extents_until_guard_largep_get(tsd) = n - 1; | ||
| 99 | } | ||
| 100 | |||
| 101 | if (n == 1 && (alignment <= PAGE) && | ||
| 102 | (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) { | ||
| 103 | *tsd_san_extents_until_guard_largep_get(tsd) = | ||
| 104 | opt_san_guard_large; | ||
| 105 | return true; | ||
| 106 | } else { | ||
| 107 | assert(tsd_san_extents_until_guard_large_get(tsd) >= 1); | ||
| 108 | return false; | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | static inline bool | ||
| 113 | san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) { | ||
| 114 | if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) || | ||
| 115 | tsdn_null(tsdn)) { | ||
| 116 | return false; | ||
| 117 | } | ||
| 118 | |||
| 119 | tsd_t *tsd = tsdn_tsd(tsdn); | ||
| 120 | uint64_t n = tsd_san_extents_until_guard_small_get(tsd); | ||
| 121 | assert(n >= 1); | ||
| 122 | if (n == 1) { | ||
| 123 | *tsd_san_extents_until_guard_smallp_get(tsd) = | ||
| 124 | opt_san_guard_small; | ||
| 125 | return true; | ||
| 126 | } else { | ||
| 127 | *tsd_san_extents_until_guard_smallp_get(tsd) = n - 1; | ||
| 128 | assert(tsd_san_extents_until_guard_small_get(tsd) >= 1); | ||
| 129 | return false; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | static inline void | ||
| 134 | san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid, | ||
| 135 | void **last) { | ||
| 136 | size_t ptr_sz = sizeof(void *); | ||
| 137 | |||
| 138 | *first = ptr; | ||
| 139 | |||
| 140 | *mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1))); | ||
| 141 | assert(*first != *mid || usize == ptr_sz); | ||
| 142 | assert((uintptr_t)*first <= (uintptr_t)*mid); | ||
| 143 | |||
| 144 | /* | ||
| 145 | * When usize > 32K, the gap between requested_size and usize might be | ||
| 146 | * greater than 4K -- this means the last write may access an | ||
| 147 | * likely-untouched page (default settings w/ 4K pages). However by | ||
| 148 | * default the tcache only goes up to the 32K size class, and is usually | ||
| 149 | * tuned lower instead of higher, which makes it less of a concern. | ||
| 150 | */ | ||
| 151 | *last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk)); | ||
| 152 | assert(*first != *last || usize == ptr_sz); | ||
| 153 | assert(*mid != *last || usize <= ptr_sz * 2); | ||
| 154 | assert((uintptr_t)*mid <= (uintptr_t)*last); | ||
| 155 | } | ||
| 156 | |||
| 157 | static inline bool | ||
| 158 | san_junk_ptr_should_slow(void) { | ||
| 159 | /* | ||
| 160 | * The latter condition (pointer size greater than the min size class) | ||
| 161 | * is not expected -- fall back to the slow path for simplicity. | ||
| 162 | */ | ||
| 163 | return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN); | ||
| 164 | } | ||
| 165 | |||
| 166 | static inline void | ||
| 167 | san_junk_ptr(void *ptr, size_t usize) { | ||
| 168 | if (san_junk_ptr_should_slow()) { | ||
| 169 | memset(ptr, (char)uaf_detect_junk, usize); | ||
| 170 | return; | ||
| 171 | } | ||
| 172 | |||
| 173 | void *first, *mid, *last; | ||
| 174 | san_junk_ptr_locations(ptr, usize, &first, &mid, &last); | ||
| 175 | *(uintptr_t *)first = uaf_detect_junk; | ||
| 176 | *(uintptr_t *)mid = uaf_detect_junk; | ||
| 177 | *(uintptr_t *)last = uaf_detect_junk; | ||
| 178 | } | ||
| 179 | |||
| 180 | static inline bool | ||
| 181 | san_uaf_detection_enabled(void) { | ||
| 182 | bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1); | ||
| 183 | if (config_uaf_detection && ret) { | ||
| 184 | assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 << | ||
| 185 | opt_lg_san_uaf_align) - 1); | ||
| 186 | } | ||
| 187 | |||
| 188 | return ret; | ||
| 189 | } | ||
| 190 | |||
| 191 | #endif /* JEMALLOC_INTERNAL_GUARD_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/san_bump.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/san_bump.h deleted file mode 100644 index 8ec4a71..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/san_bump.h +++ /dev/null | |||
| @@ -1,52 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SAN_BUMP_H | ||
| 2 | #define JEMALLOC_INTERNAL_SAN_BUMP_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/edata.h" | ||
| 5 | #include "jemalloc/internal/exp_grow.h" | ||
| 6 | #include "jemalloc/internal/mutex.h" | ||
| 7 | |||
| 8 | #define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20) | ||
| 9 | |||
| 10 | extern bool opt_retain; | ||
| 11 | |||
| 12 | typedef struct ehooks_s ehooks_t; | ||
| 13 | typedef struct pac_s pac_t; | ||
| 14 | |||
| 15 | typedef struct san_bump_alloc_s san_bump_alloc_t; | ||
| 16 | struct san_bump_alloc_s { | ||
| 17 | malloc_mutex_t mtx; | ||
| 18 | |||
| 19 | edata_t *curr_reg; | ||
| 20 | }; | ||
| 21 | |||
| 22 | static inline bool | ||
| 23 | san_bump_enabled() { | ||
| 24 | /* | ||
| 25 | * We enable san_bump allocator only when it's possible to break up a | ||
| 26 | * mapping and unmap a part of it (maps_coalesce). This is needed to | ||
| 27 | * ensure the arena destruction process can destroy all retained guarded | ||
| 28 | * extents one by one and to unmap a trailing part of a retained guarded | ||
| 29 | * region when it's too small to fit a pending allocation. | ||
| 30 | * opt_retain is required, because this allocator retains a large | ||
| 31 | * virtual memory mapping and returns smaller parts of it. | ||
| 32 | */ | ||
| 33 | return maps_coalesce && opt_retain; | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline bool | ||
| 37 | san_bump_alloc_init(san_bump_alloc_t* sba) { | ||
| 38 | bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator", | ||
| 39 | WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive); | ||
| 40 | if (err) { | ||
| 41 | return true; | ||
| 42 | } | ||
| 43 | sba->curr_reg = NULL; | ||
| 44 | |||
| 45 | return false; | ||
| 46 | } | ||
| 47 | |||
| 48 | edata_t * | ||
| 49 | san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks, | ||
| 50 | size_t size, bool zero); | ||
| 51 | |||
| 52 | #endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sc.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sc.h deleted file mode 100644 index 9bab347..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sc.h +++ /dev/null | |||
| @@ -1,357 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SC_H | ||
| 2 | #define JEMALLOC_INTERNAL_SC_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Size class computations: | ||
| 8 | * | ||
| 9 | * These are a little tricky; we'll first start by describing how things | ||
| 10 | * generally work, and then describe some of the details. | ||
| 11 | * | ||
| 12 | * Ignore the first few size classes for a moment. We can then split all the | ||
| 13 | * remaining size classes into groups. The size classes in a group are spaced | ||
| 14 | * such that they cover allocation request sizes in a power-of-2 range. The | ||
| 15 | * power of two is called the base of the group, and the size classes in it | ||
| 16 | * satisfy allocations in the half-open range (base, base * 2]. There are | ||
| 17 | * SC_NGROUP size classes in each group, equally spaced in the range, so that | ||
| 18 | * each one covers allocations for base / SC_NGROUP possible allocation sizes. | ||
| 19 | * We call that value (base / SC_NGROUP) the delta of the group. Each size class | ||
| 20 | * is delta larger than the one before it (including the initial size class in a | ||
| 21 | * group, which is delta larger than base, the largest size class in the | ||
| 22 | * previous group). | ||
| 23 | * To make the math all work out nicely, we require that SC_NGROUP is a power of | ||
| 24 | * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of | ||
| 25 | * lg_base and lg_delta. For each of these groups then, we have that | ||
| 26 | * lg_delta == lg_base - SC_LG_NGROUP. | ||
| 27 | * The size classes in a group with a given lg_base and lg_delta (which, recall, | ||
| 28 | * can be computed from lg_base for these groups) are therefore: | ||
| 29 | * base + 1 * delta | ||
| 30 | * which covers allocations in (base, base + 1 * delta] | ||
| 31 | * base + 2 * delta | ||
| 32 | * which covers allocations in (base + 1 * delta, base + 2 * delta]. | ||
| 33 | * base + 3 * delta | ||
| 34 | * which covers allocations in (base + 2 * delta, base + 3 * delta]. | ||
| 35 | * ... | ||
| 36 | * base + SC_NGROUP * delta ( == 2 * base) | ||
| 37 | * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base]. | ||
| 38 | * (Note that currently SC_NGROUP is always 4, so the "..." is empty in | ||
| 39 | * practice.) | ||
| 40 | * Note that the last size class in the group is the next power of two (after | ||
| 41 | * base), so that we've set up the induction correctly for the next group's | ||
| 42 | * selection of delta. | ||
| 43 | * | ||
| 44 | * Now, let's start considering the first few size classes. Two extra constants | ||
| 45 | * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures | ||
| 46 | * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger | ||
| 47 | * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we | ||
| 48 | * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the | ||
| 49 | * highest required alignment of a platform. For allocation sizes smaller than | ||
| 50 | * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support | ||
| 51 | * platforms with types with alignment larger than their size). To allow such | ||
| 52 | * allocations (without wasting space unnecessarily), we introduce tiny size | ||
| 53 | * classes; one per power of two, up until we hit the quantum size. There are | ||
| 54 | * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes. | ||
| 55 | * | ||
| 56 | * Next, we have a size class of size (1 << LG_QUANTUM). This can't be the | ||
| 57 | * start of a group in the sense we described above (covering a power of two | ||
| 58 | * range) since, if we divided into it to pick a value of delta, we'd get a | ||
| 59 | * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which | ||
| 60 | * is against the rules. | ||
| 61 | * | ||
| 62 | * The first base we can divide by SC_NGROUP while still being at least | ||
| 63 | * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by | ||
| 64 | * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size | ||
| 65 | * classes are: | ||
| 66 | * 1 * (1 << LG_QUANTUM) | ||
| 67 | * 2 * (1 << LG_QUANTUM) | ||
| 68 | * 3 * (1 << LG_QUANTUM) | ||
| 69 | * ... (although, as above, this "..." is empty in practice) | ||
| 70 | * SC_NGROUP * (1 << LG_QUANTUM). | ||
| 71 | * | ||
| 72 | * There are SC_NGROUP of these size classes, so we can regard it as a sort of | ||
| 73 | * pseudo-group, even though it spans multiple powers of 2, is divided | ||
| 74 | * differently, and both starts and ends on a power of 2 (as opposed to just | ||
| 75 | * ending). SC_NGROUP is itself a power of two, so the first group after the | ||
| 76 | * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a | ||
| 77 | * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP | ||
| 78 | * sizes without violating our LG_QUANTUM requirements, so we can safely set | ||
| 79 | * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM). | ||
| 80 | * | ||
| 81 | * So, in order, the size classes are: | ||
| 82 | * | ||
| 83 | * Tiny size classes: | ||
| 84 | * - Count: LG_QUANTUM - SC_LG_TINY_MIN. | ||
| 85 | * - Sizes: | ||
| 86 | * 1 << SC_LG_TINY_MIN | ||
| 87 | * 1 << (SC_LG_TINY_MIN + 1) | ||
| 88 | * 1 << (SC_LG_TINY_MIN + 2) | ||
| 89 | * ... | ||
| 90 | * 1 << (LG_QUANTUM - 1) | ||
| 91 | * | ||
| 92 | * Initial pseudo-group: | ||
| 93 | * - Count: SC_NGROUP | ||
| 94 | * - Sizes: | ||
| 95 | * 1 * (1 << LG_QUANTUM) | ||
| 96 | * 2 * (1 << LG_QUANTUM) | ||
| 97 | * 3 * (1 << LG_QUANTUM) | ||
| 98 | * ... | ||
| 99 | * SC_NGROUP * (1 << LG_QUANTUM) | ||
| 100 | * | ||
| 101 | * Regular group 0: | ||
| 102 | * - Count: SC_NGROUP | ||
| 103 | * - Sizes: | ||
| 104 | * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of | ||
| 105 | * lg_base - SC_LG_NGROUP) | ||
| 106 | * (1 << lg_base) + 1 * (1 << lg_delta) | ||
| 107 | * (1 << lg_base) + 2 * (1 << lg_delta) | ||
| 108 | * (1 << lg_base) + 3 * (1 << lg_delta) | ||
| 109 | * ... | ||
| 110 | * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] | ||
| 111 | * | ||
| 112 | * Regular group 1: | ||
| 113 | * - Count: SC_NGROUP | ||
| 114 | * - Sizes: | ||
| 115 | * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of | ||
| 116 | * lg_base - SC_LG_NGROUP) | ||
| 117 | * (1 << lg_base) + 1 * (1 << lg_delta) | ||
| 118 | * (1 << lg_base) + 2 * (1 << lg_delta) | ||
| 119 | * (1 << lg_base) + 3 * (1 << lg_delta) | ||
| 120 | * ... | ||
| 121 | * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] | ||
| 122 | * | ||
| 123 | * ... | ||
| 124 | * | ||
| 125 | * Regular group N: | ||
| 126 | * - Count: SC_NGROUP | ||
| 127 | * - Sizes: | ||
| 128 | * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of | ||
| 129 | * lg_base - SC_LG_NGROUP) | ||
| 130 | * (1 << lg_base) + 1 * (1 << lg_delta) | ||
| 131 | * (1 << lg_base) + 2 * (1 << lg_delta) | ||
| 132 | * (1 << lg_base) + 3 * (1 << lg_delta) | ||
| 133 | * ... | ||
| 134 | * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] | ||
| 135 | * | ||
| 136 | * | ||
| 137 | * Representation of metadata: | ||
| 138 | * To make the math easy, we'll mostly work in lg quantities. We record lg_base, | ||
| 139 | * lg_delta, and ndelta (i.e. number of deltas above the base) on a | ||
| 140 | * per-size-class basis, and maintain the invariant that, across all size | ||
| 141 | * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta). | ||
| 142 | * | ||
| 143 | * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP), | ||
| 144 | * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP. | ||
| 145 | * | ||
| 146 | * For the initial tiny size classes (if any), lg_base is lg(size class size). | ||
| 147 | * lg_delta is lg_base for the first size class, and lg_base - 1 for all | ||
| 148 | * subsequent ones. ndelta is always 0. | ||
| 149 | * | ||
| 150 | * For the pseudo-group, if there are no tiny size classes, then we set | ||
| 151 | * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0 | ||
| 152 | * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta | ||
| 153 | * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do | ||
| 154 | * indeed get a power of two that way). If there *are* tiny size classes, then | ||
| 155 | * the first size class needs to have lg_delta relative to the largest tiny size | ||
| 156 | * class. We therefore set lg_base == LG_QUANTUM - 1, | ||
| 157 | * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the | ||
| 158 | * pseudo-group the same. | ||
| 159 | * | ||
| 160 | * | ||
| 161 | * Other terminology: | ||
| 162 | * "Small" size classes mean those that are allocated out of bins, which is the | ||
| 163 | * same as those that are slab allocated. | ||
| 164 | * "Large" size classes are those that are not small. The cutoff for counting as | ||
| 165 | * large is page size * group size. | ||
| 166 | */ | ||
| 167 | |||
| 168 | /* | ||
| 169 | * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N. | ||
| 170 | */ | ||
| 171 | #define SC_LG_NGROUP 2 | ||
| 172 | #define SC_LG_TINY_MIN 3 | ||
| 173 | |||
| 174 | #if SC_LG_TINY_MIN == 0 | ||
| 175 | /* The div module doesn't support division by 1, which this would require. */ | ||
| 176 | #error "Unsupported LG_TINY_MIN" | ||
| 177 | #endif | ||
| 178 | |||
| 179 | /* | ||
| 180 | * The definitions below are all determined by the above settings and system | ||
| 181 | * characteristics. | ||
| 182 | */ | ||
| 183 | #define SC_NGROUP (1ULL << SC_LG_NGROUP) | ||
| 184 | #define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8) | ||
| 185 | #define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN) | ||
| 186 | #define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1) | ||
| 187 | #define SC_NPSEUDO SC_NGROUP | ||
| 188 | #define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP) | ||
| 189 | /* | ||
| 190 | * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base | ||
| 191 | * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1 | ||
| 192 | * size class shorter than the others). | ||
| 193 | * We could probably save some space in arenas by capping this at LG_VADDR size. | ||
| 194 | */ | ||
| 195 | #define SC_LG_BASE_MAX (SC_PTR_BITS - 2) | ||
| 196 | #define SC_NREGULAR (SC_NGROUP * \ | ||
| 197 | (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) | ||
| 198 | #define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR) | ||
| 199 | |||
| 200 | /* | ||
| 201 | * The number of size classes that are a multiple of the page size. | ||
| 202 | * | ||
| 203 | * Here are the first few bases that have a page-sized SC. | ||
| 204 | * | ||
| 205 | * lg(base) | base | highest SC | page-multiple SCs | ||
| 206 | * --------------|------------------------------------------ | ||
| 207 | * LG_PAGE - 1 | PAGE / 2 | PAGE | 1 | ||
| 208 | * LG_PAGE | PAGE | 2 * PAGE | 1 | ||
| 209 | * LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2 | ||
| 210 | * LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4 | ||
| 211 | * | ||
| 212 | * The number of page-multiple SCs continues to grow in powers of two, up until | ||
| 213 | * lg_delta == lg_page, which corresponds to setting lg_base to lg_page + | ||
| 214 | * SC_LG_NGROUP. So, then, the number of size classes that are multiples of the | ||
| 215 | * page size whose lg_delta is less than the page size are | ||
| 216 | * is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup. | ||
| 217 | * | ||
| 218 | * For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are | ||
| 219 | * NGROUP page-sized size classes, and when lg_base == lg_base_max, there are | ||
| 220 | * NGROUP - 1. | ||
| 221 | * | ||
| 222 | * This gives us the quantity we seek. | ||
| 223 | */ | ||
| 224 | #define SC_NPSIZES ( \ | ||
| 225 | SC_NGROUP \ | ||
| 226 | + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \ | ||
| 227 | + SC_NGROUP - 1) | ||
| 228 | |||
| 229 | /* | ||
| 230 | * We declare a size class is binnable if size < page size * group. Or, in other | ||
| 231 | * words, lg(size) < lg(page size) + lg(group size). | ||
| 232 | */ | ||
| 233 | #define SC_NBINS ( \ | ||
| 234 | /* Sub-regular size classes. */ \ | ||
| 235 | SC_NTINY + SC_NPSEUDO \ | ||
| 236 | /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \ | ||
| 237 | + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \ | ||
| 238 | /* Last SC of the last group hits the bound exactly; exclude it. */ \ | ||
| 239 | - 1) | ||
| 240 | |||
| 241 | /* | ||
| 242 | * The size2index_tab lookup table uses uint8_t to encode each bin index, so we | ||
| 243 | * cannot support more than 256 small size classes. | ||
| 244 | */ | ||
| 245 | #if (SC_NBINS > 256) | ||
| 246 | # error "Too many small size classes" | ||
| 247 | #endif | ||
| 248 | |||
| 249 | /* The largest size class in the lookup table, and its binary log. */ | ||
| 250 | #define SC_LG_MAX_LOOKUP 12 | ||
| 251 | #define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP) | ||
| 252 | |||
| 253 | /* Internal, only used for the definition of SC_SMALL_MAXCLASS. */ | ||
| 254 | #define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1)) | ||
| 255 | #define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1)) | ||
| 256 | |||
| 257 | /* The largest size class allocated out of a slab. */ | ||
| 258 | #define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \ | ||
| 259 | + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) | ||
| 260 | |||
| 261 | /* The fastpath assumes all lookup-able sizes are small. */ | ||
| 262 | #if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS) | ||
| 263 | # error "Lookup table sizes must be small" | ||
| 264 | #endif | ||
| 265 | |||
| 266 | /* The smallest size class not allocated out of a slab. */ | ||
| 267 | #define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)) | ||
| 268 | #define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP) | ||
| 269 | |||
| 270 | /* Internal; only used for the definition of SC_LARGE_MAXCLASS. */ | ||
| 271 | #define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2)) | ||
| 272 | #define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP)) | ||
| 273 | |||
| 274 | /* The largest size class supported. */ | ||
| 275 | #define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA) | ||
| 276 | |||
| 277 | /* Maximum number of regions in one slab. */ | ||
| 278 | #ifndef CONFIG_LG_SLAB_MAXREGS | ||
| 279 | # define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) | ||
| 280 | #else | ||
| 281 | # if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN) | ||
| 282 | # error "Unsupported SC_LG_SLAB_MAXREGS" | ||
| 283 | # else | ||
| 284 | # define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS | ||
| 285 | # endif | ||
| 286 | #endif | ||
| 287 | |||
| 288 | #define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS) | ||
| 289 | |||
| 290 | typedef struct sc_s sc_t; | ||
| 291 | struct sc_s { | ||
| 292 | /* Size class index, or -1 if not a valid size class. */ | ||
| 293 | int index; | ||
| 294 | /* Lg group base size (no deltas added). */ | ||
| 295 | int lg_base; | ||
| 296 | /* Lg delta to previous size class. */ | ||
| 297 | int lg_delta; | ||
| 298 | /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */ | ||
| 299 | int ndelta; | ||
| 300 | /* | ||
| 301 | * True if the size class is a multiple of the page size, false | ||
| 302 | * otherwise. | ||
| 303 | */ | ||
| 304 | bool psz; | ||
| 305 | /* | ||
| 306 | * True if the size class is a small, bin, size class. False otherwise. | ||
| 307 | */ | ||
| 308 | bool bin; | ||
| 309 | /* The slab page count if a small bin size class, 0 otherwise. */ | ||
| 310 | int pgs; | ||
| 311 | /* Same as lg_delta if a lookup table size class, 0 otherwise. */ | ||
| 312 | int lg_delta_lookup; | ||
| 313 | }; | ||
| 314 | |||
| 315 | typedef struct sc_data_s sc_data_t; | ||
| 316 | struct sc_data_s { | ||
| 317 | /* Number of tiny size classes. */ | ||
| 318 | unsigned ntiny; | ||
| 319 | /* Number of bins supported by the lookup table. */ | ||
| 320 | int nlbins; | ||
| 321 | /* Number of small size class bins. */ | ||
| 322 | int nbins; | ||
| 323 | /* Number of size classes. */ | ||
| 324 | int nsizes; | ||
| 325 | /* Number of bits required to store NSIZES. */ | ||
| 326 | int lg_ceil_nsizes; | ||
| 327 | /* Number of size classes that are a multiple of (1U << LG_PAGE). */ | ||
| 328 | unsigned npsizes; | ||
| 329 | /* Lg of maximum tiny size class (or -1, if none). */ | ||
| 330 | int lg_tiny_maxclass; | ||
| 331 | /* Maximum size class included in lookup table. */ | ||
| 332 | size_t lookup_maxclass; | ||
| 333 | /* Maximum small size class. */ | ||
| 334 | size_t small_maxclass; | ||
| 335 | /* Lg of minimum large size class. */ | ||
| 336 | int lg_large_minclass; | ||
| 337 | /* The minimum large size class. */ | ||
| 338 | size_t large_minclass; | ||
| 339 | /* Maximum (large) size class. */ | ||
| 340 | size_t large_maxclass; | ||
| 341 | /* True if the sc_data_t has been initialized (for debugging only). */ | ||
| 342 | bool initialized; | ||
| 343 | |||
| 344 | sc_t sc[SC_NSIZES]; | ||
| 345 | }; | ||
| 346 | |||
| 347 | size_t reg_size_compute(int lg_base, int lg_delta, int ndelta); | ||
| 348 | void sc_data_init(sc_data_t *data); | ||
| 349 | /* | ||
| 350 | * Updates slab sizes in [begin, end] to be pgs pages in length, if possible. | ||
| 351 | * Otherwise, does its best to accommodate the request. | ||
| 352 | */ | ||
| 353 | void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, | ||
| 354 | int pgs); | ||
| 355 | void sc_boot(sc_data_t *data); | ||
| 356 | |||
| 357 | #endif /* JEMALLOC_INTERNAL_SC_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sec.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sec.h deleted file mode 100644 index fa86338..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sec.h +++ /dev/null | |||
| @@ -1,120 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SEC_H | ||
| 2 | #define JEMALLOC_INTERNAL_SEC_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | #include "jemalloc/internal/pai.h" | ||
| 6 | |||
| 7 | /* | ||
| 8 | * Small extent cache. | ||
| 9 | * | ||
| 10 | * This includes some utilities to cache small extents. We have a per-pszind | ||
| 11 | * bin with its own list of extents of that size. We don't try to do any | ||
| 12 | * coalescing of extents (since it would in general require cross-shard locks or | ||
| 13 | * knowledge of the underlying PAI implementation). | ||
| 14 | */ | ||
| 15 | |||
| 16 | /* | ||
| 17 | * For now, this is just one field; eventually, we'll probably want to get more | ||
| 18 | * fine-grained data out (like per-size class statistics). | ||
| 19 | */ | ||
| 20 | typedef struct sec_stats_s sec_stats_t; | ||
| 21 | struct sec_stats_s { | ||
| 22 | /* Sum of bytes_cur across all shards. */ | ||
| 23 | size_t bytes; | ||
| 24 | }; | ||
| 25 | |||
| 26 | static inline void | ||
| 27 | sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) { | ||
| 28 | dst->bytes += src->bytes; | ||
| 29 | } | ||
| 30 | |||
| 31 | /* A collections of free extents, all of the same size. */ | ||
| 32 | typedef struct sec_bin_s sec_bin_t; | ||
| 33 | struct sec_bin_s { | ||
| 34 | /* | ||
| 35 | * When we fail to fulfill an allocation, we do a batch-alloc on the | ||
| 36 | * underlying allocator to fill extra items, as well. We drop the SEC | ||
| 37 | * lock while doing so, to allow operations on other bins to succeed. | ||
| 38 | * That introduces the possibility of other threads also trying to | ||
| 39 | * allocate out of this bin, failing, and also going to the backing | ||
| 40 | * allocator. To avoid a thundering herd problem in which lots of | ||
| 41 | * threads do batch allocs and overfill this bin as a result, we only | ||
| 42 | * allow one batch allocation at a time for a bin. This bool tracks | ||
| 43 | * whether or not some thread is already batch allocating. | ||
| 44 | * | ||
| 45 | * Eventually, the right answer may be a smarter sharding policy for the | ||
| 46 | * bins (e.g. a mutex per bin, which would also be more scalable | ||
| 47 | * generally; the batch-allocating thread could hold it while | ||
| 48 | * batch-allocating). | ||
| 49 | */ | ||
| 50 | bool being_batch_filled; | ||
| 51 | |||
| 52 | /* | ||
| 53 | * Number of bytes in this particular bin (as opposed to the | ||
| 54 | * sec_shard_t's bytes_cur. This isn't user visible or reported in | ||
| 55 | * stats; rather, it allows us to quickly determine the change in the | ||
| 56 | * centralized counter when flushing. | ||
| 57 | */ | ||
| 58 | size_t bytes_cur; | ||
| 59 | edata_list_active_t freelist; | ||
| 60 | }; | ||
| 61 | |||
| 62 | typedef struct sec_shard_s sec_shard_t; | ||
| 63 | struct sec_shard_s { | ||
| 64 | /* | ||
| 65 | * We don't keep per-bin mutexes, even though that would allow more | ||
| 66 | * sharding; this allows global cache-eviction, which in turn allows for | ||
| 67 | * better balancing across free lists. | ||
| 68 | */ | ||
| 69 | malloc_mutex_t mtx; | ||
| 70 | /* | ||
| 71 | * A SEC may need to be shut down (i.e. flushed of its contents and | ||
| 72 | * prevented from further caching). To avoid tricky synchronization | ||
| 73 | * issues, we just track enabled-status in each shard, guarded by a | ||
| 74 | * mutex. In practice, this is only ever checked during brief races, | ||
| 75 | * since the arena-level atomic boolean tracking HPA enabled-ness means | ||
| 76 | * that we won't go down these pathways very often after custom extent | ||
| 77 | * hooks are installed. | ||
| 78 | */ | ||
| 79 | bool enabled; | ||
| 80 | sec_bin_t *bins; | ||
| 81 | /* Number of bytes in all bins in the shard. */ | ||
| 82 | size_t bytes_cur; | ||
| 83 | /* The next pszind to flush in the flush-some pathways. */ | ||
| 84 | pszind_t to_flush_next; | ||
| 85 | }; | ||
| 86 | |||
| 87 | typedef struct sec_s sec_t; | ||
| 88 | struct sec_s { | ||
| 89 | pai_t pai; | ||
| 90 | pai_t *fallback; | ||
| 91 | |||
| 92 | sec_opts_t opts; | ||
| 93 | sec_shard_t *shards; | ||
| 94 | pszind_t npsizes; | ||
| 95 | }; | ||
| 96 | |||
| 97 | bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, | ||
| 98 | const sec_opts_t *opts); | ||
| 99 | void sec_flush(tsdn_t *tsdn, sec_t *sec); | ||
| 100 | void sec_disable(tsdn_t *tsdn, sec_t *sec); | ||
| 101 | |||
| 102 | /* | ||
| 103 | * Morally, these two stats methods probably ought to be a single one (and the | ||
| 104 | * mutex_prof_data ought to live in the sec_stats_t. But splitting them apart | ||
| 105 | * lets them fit easily into the pa_shard stats framework (which also has this | ||
| 106 | * split), which simplifies the stats management. | ||
| 107 | */ | ||
| 108 | void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats); | ||
| 109 | void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec, | ||
| 110 | mutex_prof_data_t *mutex_prof_data); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * We use the arena lock ordering; these are acquired in phase 2 of forking, but | ||
| 114 | * should be acquired before the underlying allocator mutexes. | ||
| 115 | */ | ||
| 116 | void sec_prefork2(tsdn_t *tsdn, sec_t *sec); | ||
| 117 | void sec_postfork_parent(tsdn_t *tsdn, sec_t *sec); | ||
| 118 | void sec_postfork_child(tsdn_t *tsdn, sec_t *sec); | ||
| 119 | |||
| 120 | #endif /* JEMALLOC_INTERNAL_SEC_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sec_opts.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sec_opts.h deleted file mode 100644 index a3ad72f..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sec_opts.h +++ /dev/null | |||
| @@ -1,59 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SEC_OPTS_H | ||
| 2 | #define JEMALLOC_INTERNAL_SEC_OPTS_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * The configuration settings used by an sec_t. Morally, this is part of the | ||
| 6 | * SEC interface, but we put it here for header-ordering reasons. | ||
| 7 | */ | ||
| 8 | |||
| 9 | typedef struct sec_opts_s sec_opts_t; | ||
| 10 | struct sec_opts_s { | ||
| 11 | /* | ||
| 12 | * We don't necessarily always use all the shards; requests are | ||
| 13 | * distributed across shards [0, nshards - 1). | ||
| 14 | */ | ||
| 15 | size_t nshards; | ||
| 16 | /* | ||
| 17 | * We'll automatically refuse to cache any objects in this sec if | ||
| 18 | * they're larger than max_alloc bytes, instead forwarding such objects | ||
| 19 | * directly to the fallback. | ||
| 20 | */ | ||
| 21 | size_t max_alloc; | ||
| 22 | /* | ||
| 23 | * Exceeding this amount of cached extents in a shard causes us to start | ||
| 24 | * flushing bins in that shard until we fall below bytes_after_flush. | ||
| 25 | */ | ||
| 26 | size_t max_bytes; | ||
| 27 | /* | ||
| 28 | * The number of bytes (in all bins) we flush down to when we exceed | ||
| 29 | * bytes_cur. We want this to be less than bytes_cur, because | ||
| 30 | * otherwise we could get into situations where a shard undergoing | ||
| 31 | * net-deallocation keeps bytes_cur very near to max_bytes, so that | ||
| 32 | * most deallocations get immediately forwarded to the underlying PAI | ||
| 33 | * implementation, defeating the point of the SEC. | ||
| 34 | */ | ||
| 35 | size_t bytes_after_flush; | ||
| 36 | /* | ||
| 37 | * When we can't satisfy an allocation out of the SEC because there are | ||
| 38 | * no available ones cached, we allocate multiple of that size out of | ||
| 39 | * the fallback allocator. Eventually we might want to do something | ||
| 40 | * cleverer, but for now we just grab a fixed number. | ||
| 41 | */ | ||
| 42 | size_t batch_fill_extra; | ||
| 43 | }; | ||
| 44 | |||
| 45 | #define SEC_OPTS_DEFAULT { \ | ||
| 46 | /* nshards */ \ | ||
| 47 | 4, \ | ||
| 48 | /* max_alloc */ \ | ||
| 49 | (32 * 1024) < PAGE ? PAGE : (32 * 1024), \ | ||
| 50 | /* max_bytes */ \ | ||
| 51 | 256 * 1024, \ | ||
| 52 | /* bytes_after_flush */ \ | ||
| 53 | 128 * 1024, \ | ||
| 54 | /* batch_fill_extra */ \ | ||
| 55 | 0 \ | ||
| 56 | } | ||
| 57 | |||
| 58 | |||
| 59 | #endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/seq.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/seq.h deleted file mode 100644 index ef2df4c..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/seq.h +++ /dev/null | |||
| @@ -1,55 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SEQ_H | ||
| 2 | #define JEMALLOC_INTERNAL_SEQ_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/atomic.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * A simple seqlock implementation. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #define seq_define(type, short_type) \ | ||
| 11 | typedef struct { \ | ||
| 12 | atomic_zu_t seq; \ | ||
| 13 | atomic_zu_t data[ \ | ||
| 14 | (sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \ | ||
| 15 | } seq_##short_type##_t; \ | ||
| 16 | \ | ||
| 17 | /* \ | ||
| 18 | * No internal synchronization -- the caller must ensure that there's \ | ||
| 19 | * only a single writer at a time. \ | ||
| 20 | */ \ | ||
| 21 | static inline void \ | ||
| 22 | seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \ | ||
| 23 | size_t buf[sizeof(dst->data) / sizeof(size_t)]; \ | ||
| 24 | buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \ | ||
| 25 | memcpy(buf, src, sizeof(type)); \ | ||
| 26 | size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \ | ||
| 27 | atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \ | ||
| 28 | atomic_fence(ATOMIC_RELEASE); \ | ||
| 29 | for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ | ||
| 30 | atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \ | ||
| 31 | } \ | ||
| 32 | atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \ | ||
| 33 | } \ | ||
| 34 | \ | ||
| 35 | /* Returns whether or not the read was consistent. */ \ | ||
| 36 | static inline bool \ | ||
| 37 | seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \ | ||
| 38 | size_t buf[sizeof(src->data) / sizeof(size_t)]; \ | ||
| 39 | size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \ | ||
| 40 | if (seq1 % 2 != 0) { \ | ||
| 41 | return false; \ | ||
| 42 | } \ | ||
| 43 | for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ | ||
| 44 | buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \ | ||
| 45 | } \ | ||
| 46 | atomic_fence(ATOMIC_ACQUIRE); \ | ||
| 47 | size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \ | ||
| 48 | if (seq1 != seq2) { \ | ||
| 49 | return false; \ | ||
| 50 | } \ | ||
| 51 | memcpy(dst, buf, sizeof(type)); \ | ||
| 52 | return true; \ | ||
| 53 | } | ||
| 54 | |||
| 55 | #endif /* JEMALLOC_INTERNAL_SEQ_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/slab_data.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/slab_data.h deleted file mode 100644 index e821863..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/slab_data.h +++ /dev/null | |||
| @@ -1,12 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SLAB_DATA_H | ||
| 2 | #define JEMALLOC_INTERNAL_SLAB_DATA_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bitmap.h" | ||
| 5 | |||
| 6 | typedef struct slab_data_s slab_data_t; | ||
| 7 | struct slab_data_s { | ||
| 8 | /* Per region allocated/deallocated bitmap. */ | ||
| 9 | bitmap_t bitmap[BITMAP_GROUPS_MAX]; | ||
| 10 | }; | ||
| 11 | |||
| 12 | #endif /* JEMALLOC_INTERNAL_SLAB_DATA_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/smoothstep.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/smoothstep.h deleted file mode 100644 index 2e14430..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/smoothstep.h +++ /dev/null | |||
| @@ -1,232 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H | ||
| 2 | #define JEMALLOC_INTERNAL_SMOOTHSTEP_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * This file was generated by the following command: | ||
| 6 | * sh smoothstep.sh smoother 200 24 3 15 | ||
| 7 | */ | ||
| 8 | /******************************************************************************/ | ||
| 9 | |||
| 10 | /* | ||
| 11 | * This header defines a precomputed table based on the smoothstep family of | ||
| 12 | * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 | ||
| 13 | * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so | ||
| 14 | * that floating point math can be avoided. | ||
| 15 | * | ||
| 16 | * 3 2 | ||
| 17 | * smoothstep(x) = -2x + 3x | ||
| 18 | * | ||
| 19 | * 5 4 3 | ||
| 20 | * smootherstep(x) = 6x - 15x + 10x | ||
| 21 | * | ||
| 22 | * 7 6 5 4 | ||
| 23 | * smootheststep(x) = -20x + 70x - 84x + 35x | ||
| 24 | */ | ||
| 25 | |||
| 26 | #define SMOOTHSTEP_VARIANT "smoother" | ||
| 27 | #define SMOOTHSTEP_NSTEPS 200 | ||
| 28 | #define SMOOTHSTEP_BFP 24 | ||
| 29 | #define SMOOTHSTEP \ | ||
| 30 | /* STEP(step, h, x, y) */ \ | ||
| 31 | STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ | ||
| 32 | STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ | ||
| 33 | STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ | ||
| 34 | STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ | ||
| 35 | STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ | ||
| 36 | STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ | ||
| 37 | STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ | ||
| 38 | STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ | ||
| 39 | STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ | ||
| 40 | STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ | ||
| 41 | STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ | ||
| 42 | STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ | ||
| 43 | STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ | ||
| 44 | STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ | ||
| 45 | STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ | ||
| 46 | STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ | ||
| 47 | STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ | ||
| 48 | STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ | ||
| 49 | STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ | ||
| 50 | STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ | ||
| 51 | STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ | ||
| 52 | STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ | ||
| 53 | STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ | ||
| 54 | STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ | ||
| 55 | STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ | ||
| 56 | STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ | ||
| 57 | STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ | ||
| 58 | STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ | ||
| 59 | STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ | ||
| 60 | STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ | ||
| 61 | STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ | ||
| 62 | STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ | ||
| 63 | STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ | ||
| 64 | STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ | ||
| 65 | STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ | ||
| 66 | STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ | ||
| 67 | STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ | ||
| 68 | STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ | ||
| 69 | STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ | ||
| 70 | STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ | ||
| 71 | STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ | ||
| 72 | STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ | ||
| 73 | STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ | ||
| 74 | STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ | ||
| 75 | STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ | ||
| 76 | STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ | ||
| 77 | STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ | ||
| 78 | STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ | ||
| 79 | STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ | ||
| 80 | STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ | ||
| 81 | STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ | ||
| 82 | STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ | ||
| 83 | STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ | ||
| 84 | STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ | ||
| 85 | STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ | ||
| 86 | STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ | ||
| 87 | STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ | ||
| 88 | STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ | ||
| 89 | STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ | ||
| 90 | STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ | ||
| 91 | STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ | ||
| 92 | STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ | ||
| 93 | STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ | ||
| 94 | STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ | ||
| 95 | STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ | ||
| 96 | STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ | ||
| 97 | STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ | ||
| 98 | STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ | ||
| 99 | STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ | ||
| 100 | STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ | ||
| 101 | STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ | ||
| 102 | STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ | ||
| 103 | STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ | ||
| 104 | STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ | ||
| 105 | STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ | ||
| 106 | STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ | ||
| 107 | STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ | ||
| 108 | STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ | ||
| 109 | STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ | ||
| 110 | STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ | ||
| 111 | STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ | ||
| 112 | STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ | ||
| 113 | STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ | ||
| 114 | STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ | ||
| 115 | STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ | ||
| 116 | STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ | ||
| 117 | STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ | ||
| 118 | STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ | ||
| 119 | STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ | ||
| 120 | STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ | ||
| 121 | STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ | ||
| 122 | STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ | ||
| 123 | STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ | ||
| 124 | STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ | ||
| 125 | STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ | ||
| 126 | STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ | ||
| 127 | STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ | ||
| 128 | STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ | ||
| 129 | STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ | ||
| 130 | STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ | ||
| 131 | STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ | ||
| 132 | STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ | ||
| 133 | STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ | ||
| 134 | STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ | ||
| 135 | STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ | ||
| 136 | STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ | ||
| 137 | STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ | ||
| 138 | STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ | ||
| 139 | STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ | ||
| 140 | STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ | ||
| 141 | STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ | ||
| 142 | STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ | ||
| 143 | STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ | ||
| 144 | STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ | ||
| 145 | STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ | ||
| 146 | STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ | ||
| 147 | STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ | ||
| 148 | STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ | ||
| 149 | STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ | ||
| 150 | STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ | ||
| 151 | STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ | ||
| 152 | STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ | ||
| 153 | STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ | ||
| 154 | STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ | ||
| 155 | STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ | ||
| 156 | STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ | ||
| 157 | STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ | ||
| 158 | STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ | ||
| 159 | STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ | ||
| 160 | STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ | ||
| 161 | STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ | ||
| 162 | STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ | ||
| 163 | STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ | ||
| 164 | STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ | ||
| 165 | STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ | ||
| 166 | STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ | ||
| 167 | STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ | ||
| 168 | STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ | ||
| 169 | STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ | ||
| 170 | STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ | ||
| 171 | STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ | ||
| 172 | STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ | ||
| 173 | STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ | ||
| 174 | STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ | ||
| 175 | STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ | ||
| 176 | STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ | ||
| 177 | STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ | ||
| 178 | STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ | ||
| 179 | STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ | ||
| 180 | STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ | ||
| 181 | STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ | ||
| 182 | STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ | ||
| 183 | STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ | ||
| 184 | STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ | ||
| 185 | STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ | ||
| 186 | STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ | ||
| 187 | STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ | ||
| 188 | STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ | ||
| 189 | STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ | ||
| 190 | STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ | ||
| 191 | STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ | ||
| 192 | STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ | ||
| 193 | STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ | ||
| 194 | STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ | ||
| 195 | STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ | ||
| 196 | STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ | ||
| 197 | STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ | ||
| 198 | STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ | ||
| 199 | STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ | ||
| 200 | STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ | ||
| 201 | STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ | ||
| 202 | STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ | ||
| 203 | STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ | ||
| 204 | STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ | ||
| 205 | STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ | ||
| 206 | STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ | ||
| 207 | STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ | ||
| 208 | STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ | ||
| 209 | STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ | ||
| 210 | STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ | ||
| 211 | STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ | ||
| 212 | STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ | ||
| 213 | STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ | ||
| 214 | STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ | ||
| 215 | STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ | ||
| 216 | STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ | ||
| 217 | STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ | ||
| 218 | STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ | ||
| 219 | STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ | ||
| 220 | STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ | ||
| 221 | STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ | ||
| 222 | STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ | ||
| 223 | STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ | ||
| 224 | STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ | ||
| 225 | STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ | ||
| 226 | STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ | ||
| 227 | STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ | ||
| 228 | STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ | ||
| 229 | STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ | ||
| 230 | STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ | ||
| 231 | |||
| 232 | #endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/smoothstep.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/smoothstep.sh deleted file mode 100755 index 65de97b..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/smoothstep.sh +++ /dev/null | |||
| @@ -1,101 +0,0 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | # | ||
| 3 | # Generate a discrete lookup table for a sigmoid function in the smoothstep | ||
| 4 | # family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table | ||
| 5 | # entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode | ||
| 6 | # the entries using a binary fixed point representation. | ||
| 7 | # | ||
| 8 | # Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec> | ||
| 9 | # | ||
| 10 | # <variant> is in {smooth, smoother, smoothest}. | ||
| 11 | # <nsteps> must be greater than zero. | ||
| 12 | # <bfp> must be in [0..62]; reasonable values are roughly [10..30]. | ||
| 13 | # <xprec> is x decimal precision. | ||
| 14 | # <yprec> is y decimal precision. | ||
| 15 | |||
| 16 | #set -x | ||
| 17 | |||
| 18 | cmd="sh smoothstep.sh $*" | ||
| 19 | variant=$1 | ||
| 20 | nsteps=$2 | ||
| 21 | bfp=$3 | ||
| 22 | xprec=$4 | ||
| 23 | yprec=$5 | ||
| 24 | |||
| 25 | case "${variant}" in | ||
| 26 | smooth) | ||
| 27 | ;; | ||
| 28 | smoother) | ||
| 29 | ;; | ||
| 30 | smoothest) | ||
| 31 | ;; | ||
| 32 | *) | ||
| 33 | echo "Unsupported variant" | ||
| 34 | exit 1 | ||
| 35 | ;; | ||
| 36 | esac | ||
| 37 | |||
| 38 | smooth() { | ||
| 39 | step=$1 | ||
| 40 | y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` | ||
| 41 | h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` | ||
| 42 | } | ||
| 43 | |||
| 44 | smoother() { | ||
| 45 | step=$1 | ||
| 46 | y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` | ||
| 47 | h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` | ||
| 48 | } | ||
| 49 | |||
| 50 | smoothest() { | ||
| 51 | step=$1 | ||
| 52 | y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` | ||
| 53 | h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` | ||
| 54 | } | ||
| 55 | |||
| 56 | cat <<EOF | ||
| 57 | #ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H | ||
| 58 | #define JEMALLOC_INTERNAL_SMOOTHSTEP_H | ||
| 59 | |||
| 60 | /* | ||
| 61 | * This file was generated by the following command: | ||
| 62 | * $cmd | ||
| 63 | */ | ||
| 64 | /******************************************************************************/ | ||
| 65 | |||
| 66 | /* | ||
| 67 | * This header defines a precomputed table based on the smoothstep family of | ||
| 68 | * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 | ||
| 69 | * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so | ||
| 70 | * that floating point math can be avoided. | ||
| 71 | * | ||
| 72 | * 3 2 | ||
| 73 | * smoothstep(x) = -2x + 3x | ||
| 74 | * | ||
| 75 | * 5 4 3 | ||
| 76 | * smootherstep(x) = 6x - 15x + 10x | ||
| 77 | * | ||
| 78 | * 7 6 5 4 | ||
| 79 | * smootheststep(x) = -20x + 70x - 84x + 35x | ||
| 80 | */ | ||
| 81 | |||
| 82 | #define SMOOTHSTEP_VARIANT "${variant}" | ||
| 83 | #define SMOOTHSTEP_NSTEPS ${nsteps} | ||
| 84 | #define SMOOTHSTEP_BFP ${bfp} | ||
| 85 | #define SMOOTHSTEP \\ | ||
| 86 | /* STEP(step, h, x, y) */ \\ | ||
| 87 | EOF | ||
| 88 | |||
| 89 | s=1 | ||
| 90 | while [ $s -le $nsteps ] ; do | ||
| 91 | $variant ${s} | ||
| 92 | x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` | ||
| 93 | printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y} | ||
| 94 | |||
| 95 | s=$((s+1)) | ||
| 96 | done | ||
| 97 | echo | ||
| 98 | |||
| 99 | cat <<EOF | ||
| 100 | #endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ | ||
| 101 | EOF | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/spin.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/spin.h deleted file mode 100644 index 22804c6..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/spin.h +++ /dev/null | |||
| @@ -1,40 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SPIN_H | ||
| 2 | #define JEMALLOC_INTERNAL_SPIN_H | ||
| 3 | |||
| 4 | #define SPIN_INITIALIZER {0U} | ||
| 5 | |||
| 6 | typedef struct { | ||
| 7 | unsigned iteration; | ||
| 8 | } spin_t; | ||
| 9 | |||
| 10 | static inline void | ||
| 11 | spin_cpu_spinwait() { | ||
| 12 | # if HAVE_CPU_SPINWAIT | ||
| 13 | CPU_SPINWAIT; | ||
| 14 | # else | ||
| 15 | volatile int x = 0; | ||
| 16 | x = x; | ||
| 17 | # endif | ||
| 18 | } | ||
| 19 | |||
| 20 | static inline void | ||
| 21 | spin_adaptive(spin_t *spin) { | ||
| 22 | volatile uint32_t i; | ||
| 23 | |||
| 24 | if (spin->iteration < 5) { | ||
| 25 | for (i = 0; i < (1U << spin->iteration); i++) { | ||
| 26 | spin_cpu_spinwait(); | ||
| 27 | } | ||
| 28 | spin->iteration++; | ||
| 29 | } else { | ||
| 30 | #ifdef _WIN32 | ||
| 31 | SwitchToThread(); | ||
| 32 | #else | ||
| 33 | sched_yield(); | ||
| 34 | #endif | ||
| 35 | } | ||
| 36 | } | ||
| 37 | |||
| 38 | #undef SPIN_INLINE | ||
| 39 | |||
| 40 | #endif /* JEMALLOC_INTERNAL_SPIN_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/stats.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/stats.h deleted file mode 100644 index 727f7dc..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/stats.h +++ /dev/null | |||
| @@ -1,54 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_STATS_H | ||
| 2 | #define JEMALLOC_INTERNAL_STATS_H | ||
| 3 | |||
| 4 | /* OPTION(opt, var_name, default, set_value_to) */ | ||
| 5 | #define STATS_PRINT_OPTIONS \ | ||
| 6 | OPTION('J', json, false, true) \ | ||
| 7 | OPTION('g', general, true, false) \ | ||
| 8 | OPTION('m', merged, config_stats, false) \ | ||
| 9 | OPTION('d', destroyed, config_stats, false) \ | ||
| 10 | OPTION('a', unmerged, config_stats, false) \ | ||
| 11 | OPTION('b', bins, true, false) \ | ||
| 12 | OPTION('l', large, true, false) \ | ||
| 13 | OPTION('x', mutex, true, false) \ | ||
| 14 | OPTION('e', extents, true, false) \ | ||
| 15 | OPTION('h', hpa, config_stats, false) | ||
| 16 | |||
| 17 | enum { | ||
| 18 | #define OPTION(o, v, d, s) stats_print_option_num_##v, | ||
| 19 | STATS_PRINT_OPTIONS | ||
| 20 | #undef OPTION | ||
| 21 | stats_print_tot_num_options | ||
| 22 | }; | ||
| 23 | |||
| 24 | /* Options for stats_print. */ | ||
| 25 | extern bool opt_stats_print; | ||
| 26 | extern char opt_stats_print_opts[stats_print_tot_num_options+1]; | ||
| 27 | |||
| 28 | /* Utilities for stats_interval. */ | ||
| 29 | extern int64_t opt_stats_interval; | ||
| 30 | extern char opt_stats_interval_opts[stats_print_tot_num_options+1]; | ||
| 31 | |||
| 32 | #define STATS_INTERVAL_DEFAULT -1 | ||
| 33 | /* | ||
| 34 | * Batch-increment the counter to reduce synchronization overhead. Each thread | ||
| 35 | * merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the | ||
| 36 | * BATCH_MAX for accuracy when the interval is huge (which is expected). | ||
| 37 | */ | ||
| 38 | #define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6 | ||
| 39 | #define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20) | ||
| 40 | |||
| 41 | /* Only accessed by thread event. */ | ||
| 42 | uint64_t stats_interval_new_event_wait(tsd_t *tsd); | ||
| 43 | uint64_t stats_interval_postponed_event_wait(tsd_t *tsd); | ||
| 44 | void stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed); | ||
| 45 | |||
| 46 | /* Implements je_malloc_stats_print. */ | ||
| 47 | void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts); | ||
| 48 | |||
| 49 | bool stats_boot(void); | ||
| 50 | void stats_prefork(tsdn_t *tsdn); | ||
| 51 | void stats_postfork_parent(tsdn_t *tsdn); | ||
| 52 | void stats_postfork_child(tsdn_t *tsdn); | ||
| 53 | |||
| 54 | #endif /* JEMALLOC_INTERNAL_STATS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sz.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sz.h deleted file mode 100644 index 3c0fc1d..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/sz.h +++ /dev/null | |||
| @@ -1,371 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_SIZE_H | ||
| 2 | #define JEMALLOC_INTERNAL_SIZE_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bit_util.h" | ||
| 5 | #include "jemalloc/internal/pages.h" | ||
| 6 | #include "jemalloc/internal/sc.h" | ||
| 7 | #include "jemalloc/internal/util.h" | ||
| 8 | |||
| 9 | /* | ||
| 10 | * sz module: Size computations. | ||
| 11 | * | ||
| 12 | * Some abbreviations used here: | ||
| 13 | * p: Page | ||
| 14 | * ind: Index | ||
| 15 | * s, sz: Size | ||
| 16 | * u: Usable size | ||
| 17 | * a: Aligned | ||
| 18 | * | ||
| 19 | * These are not always used completely consistently, but should be enough to | ||
| 20 | * interpret function names. E.g. sz_psz2ind converts page size to page size | ||
| 21 | * index; sz_sa2u converts a (size, alignment) allocation request to the usable | ||
| 22 | * size that would result from such an allocation. | ||
| 23 | */ | ||
| 24 | |||
| 25 | /* Page size index type. */ | ||
| 26 | typedef unsigned pszind_t; | ||
| 27 | |||
| 28 | /* Size class index type. */ | ||
| 29 | typedef unsigned szind_t; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * sz_pind2sz_tab encodes the same information as could be computed by | ||
| 33 | * sz_pind2sz_compute(). | ||
| 34 | */ | ||
| 35 | extern size_t sz_pind2sz_tab[SC_NPSIZES + 1]; | ||
| 36 | /* | ||
| 37 | * sz_index2size_tab encodes the same information as could be computed (at | ||
| 38 | * unacceptable cost in some code paths) by sz_index2size_compute(). | ||
| 39 | */ | ||
| 40 | extern size_t sz_index2size_tab[SC_NSIZES]; | ||
| 41 | /* | ||
| 42 | * sz_size2index_tab is a compact lookup table that rounds request sizes up to | ||
| 43 | * size classes. In order to reduce cache footprint, the table is compressed, | ||
| 44 | * and all accesses are via sz_size2index(). | ||
| 45 | */ | ||
| 46 | extern uint8_t sz_size2index_tab[]; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Padding for large allocations: PAGE when opt_cache_oblivious == true (to | ||
| 50 | * enable cache index randomization); 0 otherwise. | ||
| 51 | */ | ||
| 52 | extern size_t sz_large_pad; | ||
| 53 | |||
| 54 | extern void sz_boot(const sc_data_t *sc_data, bool cache_oblivious); | ||
| 55 | |||
| 56 | JEMALLOC_ALWAYS_INLINE pszind_t | ||
| 57 | sz_psz2ind(size_t psz) { | ||
| 58 | assert(psz > 0); | ||
| 59 | if (unlikely(psz > SC_LARGE_MAXCLASS)) { | ||
| 60 | return SC_NPSIZES; | ||
| 61 | } | ||
| 62 | /* x is the lg of the first base >= psz. */ | ||
| 63 | pszind_t x = lg_ceil(psz); | ||
| 64 | /* | ||
| 65 | * sc.h introduces a lot of size classes. These size classes are divided | ||
| 66 | * into different size class groups. There is a very special size class | ||
| 67 | * group, each size class in or after it is an integer multiple of PAGE. | ||
| 68 | * We call it first_ps_rg. It means first page size regular group. The | ||
| 69 | * range of first_ps_rg is (base, base * 2], and base == PAGE * | ||
| 70 | * SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g. | ||
| 71 | * off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1). | ||
| 72 | */ | ||
| 73 | pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ? | ||
| 74 | 0 : x - (SC_LG_NGROUP + LG_PAGE); | ||
| 75 | |||
| 76 | /* | ||
| 77 | * Same as sc_s::lg_delta. | ||
| 78 | * Delta for off_to_first_ps_rg == 1 is PAGE, | ||
| 79 | * for each increase in offset, it's multiplied by two. | ||
| 80 | * Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1). | ||
| 81 | */ | ||
| 82 | pszind_t lg_delta = (off_to_first_ps_rg == 0) ? | ||
| 83 | LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1); | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7. | ||
| 87 | * The leftmost bits whose len is lg_base decide the base of psz. | ||
| 88 | * The rightmost bits whose len is lg_delta decide (pgz % PAGE). | ||
| 89 | * The middle bits whose len is SC_LG_NGROUP decide ndelta. | ||
| 90 | * ndelta is offset to the first size class in the size class group, | ||
| 91 | * starts from 1. | ||
| 92 | * If you don't know lg_base, ndelta or lg_delta, see sc.h. | ||
| 93 | * |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy| | ||
| 94 | * |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->| | ||
| 95 | * |<-- ndelta -->| | ||
| 96 | * rg_inner_off = ndelta - 1 | ||
| 97 | * Why use (psz - 1)? | ||
| 98 | * To handle case: psz % (1 << lg_delta) == 0. | ||
| 99 | */ | ||
| 100 | pszind_t rg_inner_off = (((psz - 1)) >> lg_delta) & (SC_NGROUP - 1); | ||
| 101 | |||
| 102 | pszind_t base_ind = off_to_first_ps_rg << SC_LG_NGROUP; | ||
| 103 | pszind_t ind = base_ind + rg_inner_off; | ||
| 104 | return ind; | ||
| 105 | } | ||
| 106 | |||
| 107 | static inline size_t | ||
| 108 | sz_pind2sz_compute(pszind_t pind) { | ||
| 109 | if (unlikely(pind == SC_NPSIZES)) { | ||
| 110 | return SC_LARGE_MAXCLASS + PAGE; | ||
| 111 | } | ||
| 112 | size_t grp = pind >> SC_LG_NGROUP; | ||
| 113 | size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1); | ||
| 114 | |||
| 115 | size_t grp_size_mask = ~((!!grp)-1); | ||
| 116 | size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp) | ||
| 117 | & grp_size_mask; | ||
| 118 | |||
| 119 | size_t shift = (grp == 0) ? 1 : grp; | ||
| 120 | size_t lg_delta = shift + (LG_PAGE-1); | ||
| 121 | size_t mod_size = (mod+1) << lg_delta; | ||
| 122 | |||
| 123 | size_t sz = grp_size + mod_size; | ||
| 124 | return sz; | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline size_t | ||
| 128 | sz_pind2sz_lookup(pszind_t pind) { | ||
| 129 | size_t ret = (size_t)sz_pind2sz_tab[pind]; | ||
| 130 | assert(ret == sz_pind2sz_compute(pind)); | ||
| 131 | return ret; | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline size_t | ||
| 135 | sz_pind2sz(pszind_t pind) { | ||
| 136 | assert(pind < SC_NPSIZES + 1); | ||
| 137 | return sz_pind2sz_lookup(pind); | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline size_t | ||
| 141 | sz_psz2u(size_t psz) { | ||
| 142 | if (unlikely(psz > SC_LARGE_MAXCLASS)) { | ||
| 143 | return SC_LARGE_MAXCLASS + PAGE; | ||
| 144 | } | ||
| 145 | size_t x = lg_floor((psz<<1)-1); | ||
| 146 | size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? | ||
| 147 | LG_PAGE : x - SC_LG_NGROUP - 1; | ||
| 148 | size_t delta = ZU(1) << lg_delta; | ||
| 149 | size_t delta_mask = delta - 1; | ||
| 150 | size_t usize = (psz + delta_mask) & ~delta_mask; | ||
| 151 | return usize; | ||
| 152 | } | ||
| 153 | |||
| 154 | static inline szind_t | ||
| 155 | sz_size2index_compute(size_t size) { | ||
| 156 | if (unlikely(size > SC_LARGE_MAXCLASS)) { | ||
| 157 | return SC_NSIZES; | ||
| 158 | } | ||
| 159 | |||
| 160 | if (size == 0) { | ||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | #if (SC_NTINY != 0) | ||
| 164 | if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { | ||
| 165 | szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; | ||
| 166 | szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); | ||
| 167 | return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); | ||
| 168 | } | ||
| 169 | #endif | ||
| 170 | { | ||
| 171 | szind_t x = lg_floor((size<<1)-1); | ||
| 172 | szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 : | ||
| 173 | x - (SC_LG_NGROUP + LG_QUANTUM); | ||
| 174 | szind_t grp = shift << SC_LG_NGROUP; | ||
| 175 | |||
| 176 | szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) | ||
| 177 | ? LG_QUANTUM : x - SC_LG_NGROUP - 1; | ||
| 178 | |||
| 179 | size_t delta_inverse_mask = ZU(-1) << lg_delta; | ||
| 180 | szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & | ||
| 181 | ((ZU(1) << SC_LG_NGROUP) - 1); | ||
| 182 | |||
| 183 | szind_t index = SC_NTINY + grp + mod; | ||
| 184 | return index; | ||
| 185 | } | ||
| 186 | } | ||
| 187 | |||
| 188 | JEMALLOC_ALWAYS_INLINE szind_t | ||
| 189 | sz_size2index_lookup_impl(size_t size) { | ||
| 190 | assert(size <= SC_LOOKUP_MAXCLASS); | ||
| 191 | return sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1) | ||
| 192 | >> SC_LG_TINY_MIN]; | ||
| 193 | } | ||
| 194 | |||
| 195 | JEMALLOC_ALWAYS_INLINE szind_t | ||
| 196 | sz_size2index_lookup(size_t size) { | ||
| 197 | szind_t ret = sz_size2index_lookup_impl(size); | ||
| 198 | assert(ret == sz_size2index_compute(size)); | ||
| 199 | return ret; | ||
| 200 | } | ||
| 201 | |||
| 202 | JEMALLOC_ALWAYS_INLINE szind_t | ||
| 203 | sz_size2index(size_t size) { | ||
| 204 | if (likely(size <= SC_LOOKUP_MAXCLASS)) { | ||
| 205 | return sz_size2index_lookup(size); | ||
| 206 | } | ||
| 207 | return sz_size2index_compute(size); | ||
| 208 | } | ||
| 209 | |||
| 210 | static inline size_t | ||
| 211 | sz_index2size_compute(szind_t index) { | ||
| 212 | #if (SC_NTINY > 0) | ||
| 213 | if (index < SC_NTINY) { | ||
| 214 | return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index)); | ||
| 215 | } | ||
| 216 | #endif | ||
| 217 | { | ||
| 218 | size_t reduced_index = index - SC_NTINY; | ||
| 219 | size_t grp = reduced_index >> SC_LG_NGROUP; | ||
| 220 | size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) - | ||
| 221 | 1); | ||
| 222 | |||
| 223 | size_t grp_size_mask = ~((!!grp)-1); | ||
| 224 | size_t grp_size = ((ZU(1) << (LG_QUANTUM + | ||
| 225 | (SC_LG_NGROUP-1))) << grp) & grp_size_mask; | ||
| 226 | |||
| 227 | size_t shift = (grp == 0) ? 1 : grp; | ||
| 228 | size_t lg_delta = shift + (LG_QUANTUM-1); | ||
| 229 | size_t mod_size = (mod+1) << lg_delta; | ||
| 230 | |||
| 231 | size_t usize = grp_size + mod_size; | ||
| 232 | return usize; | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 237 | sz_index2size_lookup_impl(szind_t index) { | ||
| 238 | return sz_index2size_tab[index]; | ||
| 239 | } | ||
| 240 | |||
| 241 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 242 | sz_index2size_lookup(szind_t index) { | ||
| 243 | size_t ret = sz_index2size_lookup_impl(index); | ||
| 244 | assert(ret == sz_index2size_compute(index)); | ||
| 245 | return ret; | ||
| 246 | } | ||
| 247 | |||
| 248 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 249 | sz_index2size(szind_t index) { | ||
| 250 | assert(index < SC_NSIZES); | ||
| 251 | return sz_index2size_lookup(index); | ||
| 252 | } | ||
| 253 | |||
| 254 | JEMALLOC_ALWAYS_INLINE void | ||
| 255 | sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) { | ||
| 256 | *ind = sz_size2index_lookup_impl(size); | ||
| 257 | *usize = sz_index2size_lookup_impl(*ind); | ||
| 258 | } | ||
| 259 | |||
| 260 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 261 | sz_s2u_compute(size_t size) { | ||
| 262 | if (unlikely(size > SC_LARGE_MAXCLASS)) { | ||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 266 | if (size == 0) { | ||
| 267 | size++; | ||
| 268 | } | ||
| 269 | #if (SC_NTINY > 0) | ||
| 270 | if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { | ||
| 271 | size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; | ||
| 272 | size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); | ||
| 273 | return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : | ||
| 274 | (ZU(1) << lg_ceil)); | ||
| 275 | } | ||
| 276 | #endif | ||
| 277 | { | ||
| 278 | size_t x = lg_floor((size<<1)-1); | ||
| 279 | size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) | ||
| 280 | ? LG_QUANTUM : x - SC_LG_NGROUP - 1; | ||
| 281 | size_t delta = ZU(1) << lg_delta; | ||
| 282 | size_t delta_mask = delta - 1; | ||
| 283 | size_t usize = (size + delta_mask) & ~delta_mask; | ||
| 284 | return usize; | ||
| 285 | } | ||
| 286 | } | ||
| 287 | |||
| 288 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 289 | sz_s2u_lookup(size_t size) { | ||
| 290 | size_t ret = sz_index2size_lookup(sz_size2index_lookup(size)); | ||
| 291 | |||
| 292 | assert(ret == sz_s2u_compute(size)); | ||
| 293 | return ret; | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * Compute usable size that would result from allocating an object with the | ||
| 298 | * specified size. | ||
| 299 | */ | ||
| 300 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 301 | sz_s2u(size_t size) { | ||
| 302 | if (likely(size <= SC_LOOKUP_MAXCLASS)) { | ||
| 303 | return sz_s2u_lookup(size); | ||
| 304 | } | ||
| 305 | return sz_s2u_compute(size); | ||
| 306 | } | ||
| 307 | |||
| 308 | /* | ||
| 309 | * Compute usable size that would result from allocating an object with the | ||
| 310 | * specified size and alignment. | ||
| 311 | */ | ||
| 312 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 313 | sz_sa2u(size_t size, size_t alignment) { | ||
| 314 | size_t usize; | ||
| 315 | |||
| 316 | assert(alignment != 0 && ((alignment - 1) & alignment) == 0); | ||
| 317 | |||
| 318 | /* Try for a small size class. */ | ||
| 319 | if (size <= SC_SMALL_MAXCLASS && alignment <= PAGE) { | ||
| 320 | /* | ||
| 321 | * Round size up to the nearest multiple of alignment. | ||
| 322 | * | ||
| 323 | * This done, we can take advantage of the fact that for each | ||
| 324 | * small size class, every object is aligned at the smallest | ||
| 325 | * power of two that is non-zero in the base two representation | ||
| 326 | * of the size. For example: | ||
| 327 | * | ||
| 328 | * Size | Base 2 | Minimum alignment | ||
| 329 | * -----+----------+------------------ | ||
| 330 | * 96 | 1100000 | 32 | ||
| 331 | * 144 | 10100000 | 32 | ||
| 332 | * 192 | 11000000 | 64 | ||
| 333 | */ | ||
| 334 | usize = sz_s2u(ALIGNMENT_CEILING(size, alignment)); | ||
| 335 | if (usize < SC_LARGE_MINCLASS) { | ||
| 336 | return usize; | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | /* Large size class. Beware of overflow. */ | ||
| 341 | |||
| 342 | if (unlikely(alignment > SC_LARGE_MAXCLASS)) { | ||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 346 | /* Make sure result is a large size class. */ | ||
| 347 | if (size <= SC_LARGE_MINCLASS) { | ||
| 348 | usize = SC_LARGE_MINCLASS; | ||
| 349 | } else { | ||
| 350 | usize = sz_s2u(size); | ||
| 351 | if (usize < size) { | ||
| 352 | /* size_t overflow. */ | ||
| 353 | return 0; | ||
| 354 | } | ||
| 355 | } | ||
| 356 | |||
| 357 | /* | ||
| 358 | * Calculate the multi-page mapping that large_palloc() would need in | ||
| 359 | * order to guarantee the alignment. | ||
| 360 | */ | ||
| 361 | if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) { | ||
| 362 | /* size_t overflow. */ | ||
| 363 | return 0; | ||
| 364 | } | ||
| 365 | return usize; | ||
| 366 | } | ||
| 367 | |||
| 368 | size_t sz_psz_quantize_floor(size_t size); | ||
| 369 | size_t sz_psz_quantize_ceil(size_t size); | ||
| 370 | |||
| 371 | #endif /* JEMALLOC_INTERNAL_SIZE_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_externs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_externs.h deleted file mode 100644 index a2ab710..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_externs.h +++ /dev/null | |||
| @@ -1,75 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H | ||
| 2 | #define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H | ||
| 3 | |||
| 4 | extern bool opt_tcache; | ||
| 5 | extern size_t opt_tcache_max; | ||
| 6 | extern ssize_t opt_lg_tcache_nslots_mul; | ||
| 7 | extern unsigned opt_tcache_nslots_small_min; | ||
| 8 | extern unsigned opt_tcache_nslots_small_max; | ||
| 9 | extern unsigned opt_tcache_nslots_large; | ||
| 10 | extern ssize_t opt_lg_tcache_shift; | ||
| 11 | extern size_t opt_tcache_gc_incr_bytes; | ||
| 12 | extern size_t opt_tcache_gc_delay_bytes; | ||
| 13 | extern unsigned opt_lg_tcache_flush_small_div; | ||
| 14 | extern unsigned opt_lg_tcache_flush_large_div; | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more | ||
| 18 | * large-object bins. | ||
| 19 | */ | ||
| 20 | extern unsigned nhbins; | ||
| 21 | |||
| 22 | /* Maximum cached size class. */ | ||
| 23 | extern size_t tcache_maxclass; | ||
| 24 | |||
| 25 | extern cache_bin_info_t *tcache_bin_info; | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and | ||
| 29 | * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are | ||
| 30 | * completely disjoint from this data structure. tcaches starts off as a sparse | ||
| 31 | * array, so it has no physical memory footprint until individual pages are | ||
| 32 | * touched. This allows the entire array to be allocated the first time an | ||
| 33 | * explicit tcache is created without a disproportionate impact on memory usage. | ||
| 34 | */ | ||
| 35 | extern tcaches_t *tcaches; | ||
| 36 | |||
| 37 | size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); | ||
| 38 | void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, | ||
| 39 | cache_bin_t *tbin, szind_t binind, bool *tcache_success); | ||
| 40 | |||
| 41 | void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, | ||
| 42 | szind_t binind, unsigned rem); | ||
| 43 | void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, | ||
| 44 | szind_t binind, unsigned rem); | ||
| 45 | void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *bin, | ||
| 46 | szind_t binind, bool is_small); | ||
| 47 | void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, | ||
| 48 | tcache_t *tcache, arena_t *arena); | ||
| 49 | tcache_t *tcache_create_explicit(tsd_t *tsd); | ||
| 50 | void tcache_cleanup(tsd_t *tsd); | ||
| 51 | void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); | ||
| 52 | bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind); | ||
| 53 | void tcaches_flush(tsd_t *tsd, unsigned ind); | ||
| 54 | void tcaches_destroy(tsd_t *tsd, unsigned ind); | ||
| 55 | bool tcache_boot(tsdn_t *tsdn, base_t *base); | ||
| 56 | void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, | ||
| 57 | tcache_t *tcache, arena_t *arena); | ||
| 58 | void tcache_prefork(tsdn_t *tsdn); | ||
| 59 | void tcache_postfork_parent(tsdn_t *tsdn); | ||
| 60 | void tcache_postfork_child(tsdn_t *tsdn); | ||
| 61 | void tcache_flush(tsd_t *tsd); | ||
| 62 | bool tsd_tcache_data_init(tsd_t *tsd); | ||
| 63 | bool tsd_tcache_enabled_data_init(tsd_t *tsd); | ||
| 64 | |||
| 65 | void tcache_assert_initialized(tcache_t *tcache); | ||
| 66 | |||
| 67 | /* Only accessed by thread event. */ | ||
| 68 | uint64_t tcache_gc_new_event_wait(tsd_t *tsd); | ||
| 69 | uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd); | ||
| 70 | void tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed); | ||
| 71 | uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd); | ||
| 72 | uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd); | ||
| 73 | void tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed); | ||
| 74 | |||
| 75 | #endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h deleted file mode 100644 index 2634f14..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h +++ /dev/null | |||
| @@ -1,193 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H | ||
| 2 | #define JEMALLOC_INTERNAL_TCACHE_INLINES_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/bin.h" | ||
| 5 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 6 | #include "jemalloc/internal/san.h" | ||
| 7 | #include "jemalloc/internal/sc.h" | ||
| 8 | #include "jemalloc/internal/sz.h" | ||
| 9 | #include "jemalloc/internal/util.h" | ||
| 10 | |||
| 11 | static inline bool | ||
| 12 | tcache_enabled_get(tsd_t *tsd) { | ||
| 13 | return tsd_tcache_enabled_get(tsd); | ||
| 14 | } | ||
| 15 | |||
| 16 | static inline void | ||
| 17 | tcache_enabled_set(tsd_t *tsd, bool enabled) { | ||
| 18 | bool was_enabled = tsd_tcache_enabled_get(tsd); | ||
| 19 | |||
| 20 | if (!was_enabled && enabled) { | ||
| 21 | tsd_tcache_data_init(tsd); | ||
| 22 | } else if (was_enabled && !enabled) { | ||
| 23 | tcache_cleanup(tsd); | ||
| 24 | } | ||
| 25 | /* Commit the state last. Above calls check current state. */ | ||
| 26 | tsd_tcache_enabled_set(tsd, enabled); | ||
| 27 | tsd_slow_update(tsd); | ||
| 28 | } | ||
| 29 | |||
| 30 | JEMALLOC_ALWAYS_INLINE bool | ||
| 31 | tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) { | ||
| 32 | assert(ind < SC_NBINS); | ||
| 33 | bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0); | ||
| 34 | if (ret && bin != NULL) { | ||
| 35 | /* small size class but cache bin disabled. */ | ||
| 36 | assert(ind >= nhbins); | ||
| 37 | assert((uintptr_t)(*bin->stack_head) == | ||
| 38 | cache_bin_preceding_junk); | ||
| 39 | } | ||
| 40 | |||
| 41 | return ret; | ||
| 42 | } | ||
| 43 | |||
| 44 | JEMALLOC_ALWAYS_INLINE void * | ||
| 45 | tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, | ||
| 46 | size_t size, szind_t binind, bool zero, bool slow_path) { | ||
| 47 | void *ret; | ||
| 48 | bool tcache_success; | ||
| 49 | |||
| 50 | assert(binind < SC_NBINS); | ||
| 51 | cache_bin_t *bin = &tcache->bins[binind]; | ||
| 52 | ret = cache_bin_alloc(bin, &tcache_success); | ||
| 53 | assert(tcache_success == (ret != NULL)); | ||
| 54 | if (unlikely(!tcache_success)) { | ||
| 55 | bool tcache_hard_success; | ||
| 56 | arena = arena_choose(tsd, arena); | ||
| 57 | if (unlikely(arena == NULL)) { | ||
| 58 | return NULL; | ||
| 59 | } | ||
| 60 | if (unlikely(tcache_small_bin_disabled(binind, bin))) { | ||
| 61 | /* stats and zero are handled directly by the arena. */ | ||
| 62 | return arena_malloc_hard(tsd_tsdn(tsd), arena, size, | ||
| 63 | binind, zero); | ||
| 64 | } | ||
| 65 | tcache_bin_flush_stashed(tsd, tcache, bin, binind, | ||
| 66 | /* is_small */ true); | ||
| 67 | |||
| 68 | ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, | ||
| 69 | bin, binind, &tcache_hard_success); | ||
| 70 | if (tcache_hard_success == false) { | ||
| 71 | return NULL; | ||
| 72 | } | ||
| 73 | } | ||
| 74 | |||
| 75 | assert(ret); | ||
| 76 | if (unlikely(zero)) { | ||
| 77 | size_t usize = sz_index2size(binind); | ||
| 78 | assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); | ||
| 79 | memset(ret, 0, usize); | ||
| 80 | } | ||
| 81 | if (config_stats) { | ||
| 82 | bin->tstats.nrequests++; | ||
| 83 | } | ||
| 84 | return ret; | ||
| 85 | } | ||
| 86 | |||
| 87 | JEMALLOC_ALWAYS_INLINE void * | ||
| 88 | tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, | ||
| 89 | szind_t binind, bool zero, bool slow_path) { | ||
| 90 | void *ret; | ||
| 91 | bool tcache_success; | ||
| 92 | |||
| 93 | assert(binind >= SC_NBINS && binind < nhbins); | ||
| 94 | cache_bin_t *bin = &tcache->bins[binind]; | ||
| 95 | ret = cache_bin_alloc(bin, &tcache_success); | ||
| 96 | assert(tcache_success == (ret != NULL)); | ||
| 97 | if (unlikely(!tcache_success)) { | ||
| 98 | /* | ||
| 99 | * Only allocate one large object at a time, because it's quite | ||
| 100 | * expensive to create one and not use it. | ||
| 101 | */ | ||
| 102 | arena = arena_choose(tsd, arena); | ||
| 103 | if (unlikely(arena == NULL)) { | ||
| 104 | return NULL; | ||
| 105 | } | ||
| 106 | tcache_bin_flush_stashed(tsd, tcache, bin, binind, | ||
| 107 | /* is_small */ false); | ||
| 108 | |||
| 109 | ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); | ||
| 110 | if (ret == NULL) { | ||
| 111 | return NULL; | ||
| 112 | } | ||
| 113 | } else { | ||
| 114 | if (unlikely(zero)) { | ||
| 115 | size_t usize = sz_index2size(binind); | ||
| 116 | assert(usize <= tcache_maxclass); | ||
| 117 | memset(ret, 0, usize); | ||
| 118 | } | ||
| 119 | |||
| 120 | if (config_stats) { | ||
| 121 | bin->tstats.nrequests++; | ||
| 122 | } | ||
| 123 | } | ||
| 124 | |||
| 125 | return ret; | ||
| 126 | } | ||
| 127 | |||
| 128 | JEMALLOC_ALWAYS_INLINE void | ||
| 129 | tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, | ||
| 130 | bool slow_path) { | ||
| 131 | assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS); | ||
| 132 | |||
| 133 | cache_bin_t *bin = &tcache->bins[binind]; | ||
| 134 | /* | ||
| 135 | * Not marking the branch unlikely because this is past free_fastpath() | ||
| 136 | * (which handles the most common cases), i.e. at this point it's often | ||
| 137 | * uncommon cases. | ||
| 138 | */ | ||
| 139 | if (cache_bin_nonfast_aligned(ptr)) { | ||
| 140 | /* Junk unconditionally, even if bin is full. */ | ||
| 141 | san_junk_ptr(ptr, sz_index2size(binind)); | ||
| 142 | if (cache_bin_stash(bin, ptr)) { | ||
| 143 | return; | ||
| 144 | } | ||
| 145 | assert(cache_bin_full(bin)); | ||
| 146 | /* Bin full; fall through into the flush branch. */ | ||
| 147 | } | ||
| 148 | |||
| 149 | if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { | ||
| 150 | if (unlikely(tcache_small_bin_disabled(binind, bin))) { | ||
| 151 | arena_dalloc_small(tsd_tsdn(tsd), ptr); | ||
| 152 | return; | ||
| 153 | } | ||
| 154 | cache_bin_sz_t max = cache_bin_info_ncached_max( | ||
| 155 | &tcache_bin_info[binind]); | ||
| 156 | unsigned remain = max >> opt_lg_tcache_flush_small_div; | ||
| 157 | tcache_bin_flush_small(tsd, tcache, bin, binind, remain); | ||
| 158 | bool ret = cache_bin_dalloc_easy(bin, ptr); | ||
| 159 | assert(ret); | ||
| 160 | } | ||
| 161 | } | ||
| 162 | |||
| 163 | JEMALLOC_ALWAYS_INLINE void | ||
| 164 | tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, | ||
| 165 | bool slow_path) { | ||
| 166 | |||
| 167 | assert(tcache_salloc(tsd_tsdn(tsd), ptr) | ||
| 168 | > SC_SMALL_MAXCLASS); | ||
| 169 | assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); | ||
| 170 | |||
| 171 | cache_bin_t *bin = &tcache->bins[binind]; | ||
| 172 | if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { | ||
| 173 | unsigned remain = cache_bin_info_ncached_max( | ||
| 174 | &tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div; | ||
| 175 | tcache_bin_flush_large(tsd, tcache, bin, binind, remain); | ||
| 176 | bool ret = cache_bin_dalloc_easy(bin, ptr); | ||
| 177 | assert(ret); | ||
| 178 | } | ||
| 179 | } | ||
| 180 | |||
| 181 | JEMALLOC_ALWAYS_INLINE tcache_t * | ||
| 182 | tcaches_get(tsd_t *tsd, unsigned ind) { | ||
| 183 | tcaches_t *elm = &tcaches[ind]; | ||
| 184 | if (unlikely(elm->tcache == NULL)) { | ||
| 185 | malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind); | ||
| 186 | abort(); | ||
| 187 | } else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) { | ||
| 188 | elm->tcache = tcache_create_explicit(tsd); | ||
| 189 | } | ||
| 190 | return elm->tcache; | ||
| 191 | } | ||
| 192 | |||
| 193 | #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_structs.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_structs.h deleted file mode 100644 index 176d73d..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_structs.h +++ /dev/null | |||
| @@ -1,68 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H | ||
| 2 | #define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/cache_bin.h" | ||
| 5 | #include "jemalloc/internal/ql.h" | ||
| 6 | #include "jemalloc/internal/sc.h" | ||
| 7 | #include "jemalloc/internal/ticker.h" | ||
| 8 | #include "jemalloc/internal/tsd_types.h" | ||
| 9 | |||
| 10 | /* | ||
| 11 | * The tcache state is split into the slow and hot path data. Each has a | ||
| 12 | * pointer to the other, and the data always comes in pairs. The layout of each | ||
| 13 | * of them varies in practice; tcache_slow lives in the TSD for the automatic | ||
| 14 | * tcache, and as part of a dynamic allocation for manual allocations. Keeping | ||
| 15 | * a pointer to tcache_slow lets us treat these cases uniformly, rather than | ||
| 16 | * splitting up the tcache [de]allocation code into those paths called with the | ||
| 17 | * TSD tcache and those called with a manual tcache. | ||
| 18 | */ | ||
| 19 | |||
| 20 | struct tcache_slow_s { | ||
| 21 | /* Lets us track all the tcaches in an arena. */ | ||
| 22 | ql_elm(tcache_slow_t) link; | ||
| 23 | |||
| 24 | /* | ||
| 25 | * The descriptor lets the arena find our cache bins without seeing the | ||
| 26 | * tcache definition. This enables arenas to aggregate stats across | ||
| 27 | * tcaches without having a tcache dependency. | ||
| 28 | */ | ||
| 29 | cache_bin_array_descriptor_t cache_bin_array_descriptor; | ||
| 30 | |||
| 31 | /* The arena this tcache is associated with. */ | ||
| 32 | arena_t *arena; | ||
| 33 | /* Next bin to GC. */ | ||
| 34 | szind_t next_gc_bin; | ||
| 35 | /* For small bins, fill (ncached_max >> lg_fill_div). */ | ||
| 36 | uint8_t lg_fill_div[SC_NBINS]; | ||
| 37 | /* For small bins, whether has been refilled since last GC. */ | ||
| 38 | bool bin_refilled[SC_NBINS]; | ||
| 39 | /* | ||
| 40 | * For small bins, the number of items we can pretend to flush before | ||
| 41 | * actually flushing. | ||
| 42 | */ | ||
| 43 | uint8_t bin_flush_delay_items[SC_NBINS]; | ||
| 44 | /* | ||
| 45 | * The start of the allocation containing the dynamic allocation for | ||
| 46 | * either the cache bins alone, or the cache bin memory as well as this | ||
| 47 | * tcache_slow_t and its associated tcache_t. | ||
| 48 | */ | ||
| 49 | void *dyn_alloc; | ||
| 50 | |||
| 51 | /* The associated bins. */ | ||
| 52 | tcache_t *tcache; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct tcache_s { | ||
| 56 | tcache_slow_t *tcache_slow; | ||
| 57 | cache_bin_t bins[TCACHE_NBINS_MAX]; | ||
| 58 | }; | ||
| 59 | |||
| 60 | /* Linkage for list of available (previously used) explicit tcache IDs. */ | ||
| 61 | struct tcaches_s { | ||
| 62 | union { | ||
| 63 | tcache_t *tcache; | ||
| 64 | tcaches_t *next; | ||
| 65 | }; | ||
| 66 | }; | ||
| 67 | |||
| 68 | #endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_types.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_types.h deleted file mode 100644 index 583677e..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tcache_types.h +++ /dev/null | |||
| @@ -1,35 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H | ||
| 2 | #define JEMALLOC_INTERNAL_TCACHE_TYPES_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/sc.h" | ||
| 5 | |||
| 6 | typedef struct tcache_slow_s tcache_slow_t; | ||
| 7 | typedef struct tcache_s tcache_t; | ||
| 8 | typedef struct tcaches_s tcaches_t; | ||
| 9 | |||
| 10 | /* | ||
| 11 | * tcache pointers close to NULL are used to encode state information that is | ||
| 12 | * used for two purposes: preventing thread caching on a per thread basis and | ||
| 13 | * cleaning up during thread shutdown. | ||
| 14 | */ | ||
| 15 | #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) | ||
| 16 | #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) | ||
| 17 | #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) | ||
| 18 | #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY | ||
| 19 | |||
| 20 | /* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */ | ||
| 21 | #define TCACHE_ZERO_INITIALIZER {0} | ||
| 22 | #define TCACHE_SLOW_ZERO_INITIALIZER {0} | ||
| 23 | |||
| 24 | /* Used in TSD static initializer only. Will be initialized to opt_tcache. */ | ||
| 25 | #define TCACHE_ENABLED_ZERO_INITIALIZER false | ||
| 26 | |||
| 27 | /* Used for explicit tcache only. Means flushed but not destroyed. */ | ||
| 28 | #define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1) | ||
| 29 | |||
| 30 | #define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */ | ||
| 31 | #define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT) | ||
| 32 | #define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \ | ||
| 33 | (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1) | ||
| 34 | |||
| 35 | #endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/test_hooks.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/test_hooks.h deleted file mode 100644 index 3d530b5..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/test_hooks.h +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H | ||
| 2 | #define JEMALLOC_INTERNAL_TEST_HOOKS_H | ||
| 3 | |||
| 4 | extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(); | ||
| 5 | extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(); | ||
| 6 | |||
| 7 | #if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST) | ||
| 8 | # define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) | ||
| 9 | |||
| 10 | # define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook) | ||
| 11 | # define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook) | ||
| 12 | # define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook) | ||
| 13 | # define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook) | ||
| 14 | # define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook) | ||
| 15 | # define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook) | ||
| 16 | # define secure_getenv JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook) | ||
| 17 | /* Note that this is undef'd and re-define'd in src/prof.c. */ | ||
| 18 | # define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) | ||
| 19 | #else | ||
| 20 | # define JEMALLOC_TEST_HOOK(fn, hook) fn | ||
| 21 | #endif | ||
| 22 | |||
| 23 | |||
| 24 | #endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/thread_event.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/thread_event.h deleted file mode 100644 index 2f4e1b3..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/thread_event.h +++ /dev/null | |||
| @@ -1,301 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H | ||
| 2 | #define JEMALLOC_INTERNAL_THREAD_EVENT_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/tsd.h" | ||
| 5 | |||
| 6 | /* "te" is short for "thread_event" */ | ||
| 7 | |||
| 8 | /* | ||
| 9 | * TE_MIN_START_WAIT should not exceed the minimal allocation usize. | ||
| 10 | */ | ||
| 11 | #define TE_MIN_START_WAIT ((uint64_t)1U) | ||
| 12 | #define TE_MAX_START_WAIT UINT64_MAX | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Maximum threshold on thread_(de)allocated_next_event_fast, so that there is | ||
| 16 | * no need to check overflow in malloc fast path. (The allocation size in malloc | ||
| 17 | * fast path never exceeds SC_LOOKUP_MAXCLASS.) | ||
| 18 | */ | ||
| 19 | #define TE_NEXT_EVENT_FAST_MAX (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U) | ||
| 20 | |||
| 21 | /* | ||
| 22 | * The max interval helps make sure that malloc stays on the fast path in the | ||
| 23 | * common case, i.e. thread_allocated < thread_allocated_next_event_fast. When | ||
| 24 | * thread_allocated is within an event's distance to TE_NEXT_EVENT_FAST_MAX | ||
| 25 | * above, thread_allocated_next_event_fast is wrapped around and we fall back to | ||
| 26 | * the medium-fast path. The max interval makes sure that we're not staying on | ||
| 27 | * the fallback case for too long, even if there's no active event or if all | ||
| 28 | * active events have long wait times. | ||
| 29 | */ | ||
| 30 | #define TE_MAX_INTERVAL ((uint64_t)(4U << 20)) | ||
| 31 | |||
| 32 | /* | ||
| 33 | * Invalid elapsed time, for situations where elapsed time is not needed. See | ||
| 34 | * comments in thread_event.c for more info. | ||
| 35 | */ | ||
| 36 | #define TE_INVALID_ELAPSED UINT64_MAX | ||
| 37 | |||
| 38 | typedef struct te_ctx_s { | ||
| 39 | bool is_alloc; | ||
| 40 | uint64_t *current; | ||
| 41 | uint64_t *last_event; | ||
| 42 | uint64_t *next_event; | ||
| 43 | uint64_t *next_event_fast; | ||
| 44 | } te_ctx_t; | ||
| 45 | |||
| 46 | void te_assert_invariants_debug(tsd_t *tsd); | ||
| 47 | void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx); | ||
| 48 | void te_recompute_fast_threshold(tsd_t *tsd); | ||
| 49 | void tsd_te_init(tsd_t *tsd); | ||
| 50 | |||
| 51 | /* | ||
| 52 | * List of all events, in the following format: | ||
| 53 | * E(event, (condition), is_alloc_event) | ||
| 54 | */ | ||
| 55 | #define ITERATE_OVER_ALL_EVENTS \ | ||
| 56 | E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \ | ||
| 57 | E(prof_sample, (config_prof && opt_prof), true) \ | ||
| 58 | E(stats_interval, (opt_stats_interval >= 0), true) \ | ||
| 59 | E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false) \ | ||
| 60 | E(peak_alloc, config_stats, true) \ | ||
| 61 | E(peak_dalloc, config_stats, false) | ||
| 62 | |||
| 63 | #define E(event, condition_unused, is_alloc_event_unused) \ | ||
| 64 | C(event##_event_wait) | ||
| 65 | |||
| 66 | /* List of all thread event counters. */ | ||
| 67 | #define ITERATE_OVER_ALL_COUNTERS \ | ||
| 68 | C(thread_allocated) \ | ||
| 69 | C(thread_allocated_last_event) \ | ||
| 70 | ITERATE_OVER_ALL_EVENTS \ | ||
| 71 | C(prof_sample_last_event) \ | ||
| 72 | C(stats_interval_last_event) | ||
| 73 | |||
| 74 | /* Getters directly wrap TSD getters. */ | ||
| 75 | #define C(counter) \ | ||
| 76 | JEMALLOC_ALWAYS_INLINE uint64_t \ | ||
| 77 | counter##_get(tsd_t *tsd) { \ | ||
| 78 | return tsd_##counter##_get(tsd); \ | ||
| 79 | } | ||
| 80 | |||
| 81 | ITERATE_OVER_ALL_COUNTERS | ||
| 82 | #undef C | ||
| 83 | |||
| 84 | /* | ||
| 85 | * Setters call the TSD pointer getters rather than the TSD setters, so that | ||
| 86 | * the counters can be modified even when TSD state is reincarnated or | ||
| 87 | * minimal_initialized: if an event is triggered in such cases, we will | ||
| 88 | * temporarily delay the event and let it be immediately triggered at the next | ||
| 89 | * allocation call. | ||
| 90 | */ | ||
| 91 | #define C(counter) \ | ||
| 92 | JEMALLOC_ALWAYS_INLINE void \ | ||
| 93 | counter##_set(tsd_t *tsd, uint64_t v) { \ | ||
| 94 | *tsd_##counter##p_get(tsd) = v; \ | ||
| 95 | } | ||
| 96 | |||
| 97 | ITERATE_OVER_ALL_COUNTERS | ||
| 98 | #undef C | ||
| 99 | |||
| 100 | /* | ||
| 101 | * For generating _event_wait getter / setter functions for each individual | ||
| 102 | * event. | ||
| 103 | */ | ||
| 104 | #undef E | ||
| 105 | |||
| 106 | /* | ||
| 107 | * The malloc and free fastpath getters -- use the unsafe getters since tsd may | ||
| 108 | * be non-nominal, in which case the fast_threshold will be set to 0. This | ||
| 109 | * allows checking for events and tsd non-nominal in a single branch. | ||
| 110 | * | ||
| 111 | * Note that these can only be used on the fastpath. | ||
| 112 | */ | ||
| 113 | JEMALLOC_ALWAYS_INLINE void | ||
| 114 | te_malloc_fastpath_ctx(tsd_t *tsd, uint64_t *allocated, uint64_t *threshold) { | ||
| 115 | *allocated = *tsd_thread_allocatedp_get_unsafe(tsd); | ||
| 116 | *threshold = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd); | ||
| 117 | assert(*threshold <= TE_NEXT_EVENT_FAST_MAX); | ||
| 118 | } | ||
| 119 | |||
| 120 | JEMALLOC_ALWAYS_INLINE void | ||
| 121 | te_free_fastpath_ctx(tsd_t *tsd, uint64_t *deallocated, uint64_t *threshold) { | ||
| 122 | /* Unsafe getters since this may happen before tsd_init. */ | ||
| 123 | *deallocated = *tsd_thread_deallocatedp_get_unsafe(tsd); | ||
| 124 | *threshold = *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd); | ||
| 125 | assert(*threshold <= TE_NEXT_EVENT_FAST_MAX); | ||
| 126 | } | ||
| 127 | |||
| 128 | JEMALLOC_ALWAYS_INLINE bool | ||
| 129 | te_ctx_is_alloc(te_ctx_t *ctx) { | ||
| 130 | return ctx->is_alloc; | ||
| 131 | } | ||
| 132 | |||
| 133 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 134 | te_ctx_current_bytes_get(te_ctx_t *ctx) { | ||
| 135 | return *ctx->current; | ||
| 136 | } | ||
| 137 | |||
| 138 | JEMALLOC_ALWAYS_INLINE void | ||
| 139 | te_ctx_current_bytes_set(te_ctx_t *ctx, uint64_t v) { | ||
| 140 | *ctx->current = v; | ||
| 141 | } | ||
| 142 | |||
| 143 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 144 | te_ctx_last_event_get(te_ctx_t *ctx) { | ||
| 145 | return *ctx->last_event; | ||
| 146 | } | ||
| 147 | |||
| 148 | JEMALLOC_ALWAYS_INLINE void | ||
| 149 | te_ctx_last_event_set(te_ctx_t *ctx, uint64_t v) { | ||
| 150 | *ctx->last_event = v; | ||
| 151 | } | ||
| 152 | |||
| 153 | /* Below 3 for next_event_fast. */ | ||
| 154 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 155 | te_ctx_next_event_fast_get(te_ctx_t *ctx) { | ||
| 156 | uint64_t v = *ctx->next_event_fast; | ||
| 157 | assert(v <= TE_NEXT_EVENT_FAST_MAX); | ||
| 158 | return v; | ||
| 159 | } | ||
| 160 | |||
| 161 | JEMALLOC_ALWAYS_INLINE void | ||
| 162 | te_ctx_next_event_fast_set(te_ctx_t *ctx, uint64_t v) { | ||
| 163 | assert(v <= TE_NEXT_EVENT_FAST_MAX); | ||
| 164 | *ctx->next_event_fast = v; | ||
| 165 | } | ||
| 166 | |||
| 167 | JEMALLOC_ALWAYS_INLINE void | ||
| 168 | te_next_event_fast_set_non_nominal(tsd_t *tsd) { | ||
| 169 | /* | ||
| 170 | * Set the fast thresholds to zero when tsd is non-nominal. Use the | ||
| 171 | * unsafe getter as this may get called during tsd init and clean up. | ||
| 172 | */ | ||
| 173 | *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0; | ||
| 174 | *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) = 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | /* For next_event. Setter also updates the fast threshold. */ | ||
| 178 | JEMALLOC_ALWAYS_INLINE uint64_t | ||
| 179 | te_ctx_next_event_get(te_ctx_t *ctx) { | ||
| 180 | return *ctx->next_event; | ||
| 181 | } | ||
| 182 | |||
| 183 | JEMALLOC_ALWAYS_INLINE void | ||
| 184 | te_ctx_next_event_set(tsd_t *tsd, te_ctx_t *ctx, uint64_t v) { | ||
| 185 | *ctx->next_event = v; | ||
| 186 | te_recompute_fast_threshold(tsd); | ||
| 187 | } | ||
| 188 | |||
| 189 | /* | ||
| 190 | * The function checks in debug mode whether the thread event counters are in | ||
| 191 | * a consistent state, which forms the invariants before and after each round | ||
| 192 | * of thread event handling that we can rely on and need to promise. | ||
| 193 | * The invariants are only temporarily violated in the middle of | ||
| 194 | * te_event_advance() if an event is triggered (the te_event_trigger() call at | ||
| 195 | * the end will restore the invariants). | ||
| 196 | */ | ||
| 197 | JEMALLOC_ALWAYS_INLINE void | ||
| 198 | te_assert_invariants(tsd_t *tsd) { | ||
| 199 | if (config_debug) { | ||
| 200 | te_assert_invariants_debug(tsd); | ||
| 201 | } | ||
| 202 | } | ||
| 203 | |||
| 204 | JEMALLOC_ALWAYS_INLINE void | ||
| 205 | te_ctx_get(tsd_t *tsd, te_ctx_t *ctx, bool is_alloc) { | ||
| 206 | ctx->is_alloc = is_alloc; | ||
| 207 | if (is_alloc) { | ||
| 208 | ctx->current = tsd_thread_allocatedp_get(tsd); | ||
| 209 | ctx->last_event = tsd_thread_allocated_last_eventp_get(tsd); | ||
| 210 | ctx->next_event = tsd_thread_allocated_next_eventp_get(tsd); | ||
| 211 | ctx->next_event_fast = | ||
| 212 | tsd_thread_allocated_next_event_fastp_get(tsd); | ||
| 213 | } else { | ||
| 214 | ctx->current = tsd_thread_deallocatedp_get(tsd); | ||
| 215 | ctx->last_event = tsd_thread_deallocated_last_eventp_get(tsd); | ||
| 216 | ctx->next_event = tsd_thread_deallocated_next_eventp_get(tsd); | ||
| 217 | ctx->next_event_fast = | ||
| 218 | tsd_thread_deallocated_next_event_fastp_get(tsd); | ||
| 219 | } | ||
| 220 | } | ||
| 221 | |||
| 222 | /* | ||
| 223 | * The lookahead functionality facilitates events to be able to lookahead, i.e. | ||
| 224 | * without touching the event counters, to determine whether an event would be | ||
| 225 | * triggered. The event counters are not advanced until the end of the | ||
| 226 | * allocation / deallocation calls, so the lookahead can be useful if some | ||
| 227 | * preparation work for some event must be done early in the allocation / | ||
| 228 | * deallocation calls. | ||
| 229 | * | ||
| 230 | * Currently only the profiling sampling event needs the lookahead | ||
| 231 | * functionality, so we don't yet define general purpose lookahead functions. | ||
| 232 | * | ||
| 233 | * Surplus is a terminology referring to the amount of bytes beyond what's | ||
| 234 | * needed for triggering an event, which can be a useful quantity to have in | ||
| 235 | * general when lookahead is being called. | ||
| 236 | */ | ||
| 237 | |||
| 238 | JEMALLOC_ALWAYS_INLINE bool | ||
| 239 | te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize, | ||
| 240 | size_t *surplus) { | ||
| 241 | if (surplus != NULL) { | ||
| 242 | /* | ||
| 243 | * This is a dead store: the surplus will be overwritten before | ||
| 244 | * any read. The initialization suppresses compiler warnings. | ||
| 245 | * Meanwhile, using SIZE_MAX to initialize is good for | ||
| 246 | * debugging purpose, because a valid surplus value is strictly | ||
| 247 | * less than usize, which is at most SIZE_MAX. | ||
| 248 | */ | ||
| 249 | *surplus = SIZE_MAX; | ||
| 250 | } | ||
| 251 | if (unlikely(!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0)) { | ||
| 252 | return false; | ||
| 253 | } | ||
| 254 | /* The subtraction is intentionally susceptible to underflow. */ | ||
| 255 | uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize - | ||
| 256 | tsd_thread_allocated_last_event_get(tsd); | ||
| 257 | uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd); | ||
| 258 | if (accumbytes < sample_wait) { | ||
| 259 | return false; | ||
| 260 | } | ||
| 261 | assert(accumbytes - sample_wait < (uint64_t)usize); | ||
| 262 | if (surplus != NULL) { | ||
| 263 | *surplus = (size_t)(accumbytes - sample_wait); | ||
| 264 | } | ||
| 265 | return true; | ||
| 266 | } | ||
| 267 | |||
| 268 | JEMALLOC_ALWAYS_INLINE bool | ||
| 269 | te_prof_sample_event_lookahead(tsd_t *tsd, size_t usize) { | ||
| 270 | return te_prof_sample_event_lookahead_surplus(tsd, usize, NULL); | ||
| 271 | } | ||
| 272 | |||
| 273 | JEMALLOC_ALWAYS_INLINE void | ||
| 274 | te_event_advance(tsd_t *tsd, size_t usize, bool is_alloc) { | ||
| 275 | te_assert_invariants(tsd); | ||
| 276 | |||
| 277 | te_ctx_t ctx; | ||
| 278 | te_ctx_get(tsd, &ctx, is_alloc); | ||
| 279 | |||
| 280 | uint64_t bytes_before = te_ctx_current_bytes_get(&ctx); | ||
| 281 | te_ctx_current_bytes_set(&ctx, bytes_before + usize); | ||
| 282 | |||
| 283 | /* The subtraction is intentionally susceptible to underflow. */ | ||
| 284 | if (likely(usize < te_ctx_next_event_get(&ctx) - bytes_before)) { | ||
| 285 | te_assert_invariants(tsd); | ||
| 286 | } else { | ||
| 287 | te_event_trigger(tsd, &ctx); | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | JEMALLOC_ALWAYS_INLINE void | ||
| 292 | thread_dalloc_event(tsd_t *tsd, size_t usize) { | ||
| 293 | te_event_advance(tsd, usize, false); | ||
| 294 | } | ||
| 295 | |||
| 296 | JEMALLOC_ALWAYS_INLINE void | ||
| 297 | thread_alloc_event(tsd_t *tsd, size_t usize) { | ||
| 298 | te_event_advance(tsd, usize, true); | ||
| 299 | } | ||
| 300 | |||
| 301 | #endif /* JEMALLOC_INTERNAL_THREAD_EVENT_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ticker.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ticker.h deleted file mode 100644 index 6b51dde..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/ticker.h +++ /dev/null | |||
| @@ -1,175 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TICKER_H | ||
| 2 | #define JEMALLOC_INTERNAL_TICKER_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/prng.h" | ||
| 5 | #include "jemalloc/internal/util.h" | ||
| 6 | |||
| 7 | /** | ||
| 8 | * A ticker makes it easy to count-down events until some limit. You | ||
| 9 | * ticker_init the ticker to trigger every nticks events. You then notify it | ||
| 10 | * that an event has occurred with calls to ticker_tick (or that nticks events | ||
| 11 | * have occurred with a call to ticker_ticks), which will return true (and reset | ||
| 12 | * the counter) if the countdown hit zero. | ||
| 13 | */ | ||
| 14 | typedef struct ticker_s ticker_t; | ||
| 15 | struct ticker_s { | ||
| 16 | int32_t tick; | ||
| 17 | int32_t nticks; | ||
| 18 | }; | ||
| 19 | |||
| 20 | static inline void | ||
| 21 | ticker_init(ticker_t *ticker, int32_t nticks) { | ||
| 22 | ticker->tick = nticks; | ||
| 23 | ticker->nticks = nticks; | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline void | ||
| 27 | ticker_copy(ticker_t *ticker, const ticker_t *other) { | ||
| 28 | *ticker = *other; | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline int32_t | ||
| 32 | ticker_read(const ticker_t *ticker) { | ||
| 33 | return ticker->tick; | ||
| 34 | } | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Not intended to be a public API. Unfortunately, on x86, neither gcc nor | ||
| 38 | * clang seems smart enough to turn | ||
| 39 | * ticker->tick -= nticks; | ||
| 40 | * if (unlikely(ticker->tick < 0)) { | ||
| 41 | * fixup ticker | ||
| 42 | * return true; | ||
| 43 | * } | ||
| 44 | * return false; | ||
| 45 | * into | ||
| 46 | * subq %nticks_reg, (%ticker_reg) | ||
| 47 | * js fixup ticker | ||
| 48 | * | ||
| 49 | * unless we force "fixup ticker" out of line. In that case, gcc gets it right, | ||
| 50 | * but clang now does worse than before. So, on x86 with gcc, we force it out | ||
| 51 | * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be | ||
| 52 | * worth the hassle, but this is on the fast path of both malloc and free (via | ||
| 53 | * tcache_event). | ||
| 54 | */ | ||
| 55 | #if defined(__GNUC__) && !defined(__clang__) \ | ||
| 56 | && (defined(__x86_64__) || defined(__i386__)) | ||
| 57 | JEMALLOC_NOINLINE | ||
| 58 | #endif | ||
| 59 | static bool | ||
| 60 | ticker_fixup(ticker_t *ticker) { | ||
| 61 | ticker->tick = ticker->nticks; | ||
| 62 | return true; | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline bool | ||
| 66 | ticker_ticks(ticker_t *ticker, int32_t nticks) { | ||
| 67 | ticker->tick -= nticks; | ||
| 68 | if (unlikely(ticker->tick < 0)) { | ||
| 69 | return ticker_fixup(ticker); | ||
| 70 | } | ||
| 71 | return false; | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline bool | ||
| 75 | ticker_tick(ticker_t *ticker) { | ||
| 76 | return ticker_ticks(ticker, 1); | ||
| 77 | } | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Try to tick. If ticker would fire, return true, but rely on | ||
| 81 | * slowpath to reset ticker. | ||
| 82 | */ | ||
| 83 | static inline bool | ||
| 84 | ticker_trytick(ticker_t *ticker) { | ||
| 85 | --ticker->tick; | ||
| 86 | if (unlikely(ticker->tick < 0)) { | ||
| 87 | return true; | ||
| 88 | } | ||
| 89 | return false; | ||
| 90 | } | ||
| 91 | |||
| 92 | /* | ||
| 93 | * The ticker_geom_t is much like the ticker_t, except that instead of ticker | ||
| 94 | * having a constant countdown, it has an approximate one; each tick has | ||
| 95 | * approximately a 1/nticks chance of triggering the count. | ||
| 96 | * | ||
| 97 | * The motivation is in triggering arena decay. With a naive strategy, each | ||
| 98 | * thread would maintain a ticker per arena, and check if decay is necessary | ||
| 99 | * each time that the arena's ticker fires. This has two costs: | ||
| 100 | * - Since under reasonable assumptions both threads and arenas can scale | ||
| 101 | * linearly with the number of CPUs, maintaining per-arena data in each thread | ||
| 102 | * scales quadratically with the number of CPUs. | ||
| 103 | * - These tickers are often a cache miss down tcache flush pathways. | ||
| 104 | * | ||
| 105 | * By giving each tick a 1/nticks chance of firing, we still maintain the same | ||
| 106 | * average number of ticks-until-firing per arena, with only a single ticker's | ||
| 107 | * worth of metadata. | ||
| 108 | */ | ||
| 109 | |||
| 110 | /* See ticker.c for an explanation of these constants. */ | ||
| 111 | #define TICKER_GEOM_NBITS 6 | ||
| 112 | #define TICKER_GEOM_MUL 61 | ||
| 113 | extern const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS]; | ||
| 114 | |||
| 115 | /* Not actually any different from ticker_t; just for type safety. */ | ||
| 116 | typedef struct ticker_geom_s ticker_geom_t; | ||
| 117 | struct ticker_geom_s { | ||
| 118 | int32_t tick; | ||
| 119 | int32_t nticks; | ||
| 120 | }; | ||
| 121 | |||
| 122 | /* | ||
| 123 | * Just pick the average delay for the first counter. We're more concerned with | ||
| 124 | * the behavior over long periods of time rather than the exact timing of the | ||
| 125 | * initial ticks. | ||
| 126 | */ | ||
| 127 | #define TICKER_GEOM_INIT(nticks) {nticks, nticks} | ||
| 128 | |||
| 129 | static inline void | ||
| 130 | ticker_geom_init(ticker_geom_t *ticker, int32_t nticks) { | ||
| 131 | /* | ||
| 132 | * Make sure there's no overflow possible. This shouldn't really be a | ||
| 133 | * problem for reasonable nticks choices, which are all static and | ||
| 134 | * relatively small. | ||
| 135 | */ | ||
| 136 | assert((uint64_t)nticks * (uint64_t)255 / (uint64_t)TICKER_GEOM_MUL | ||
| 137 | <= (uint64_t)INT32_MAX); | ||
| 138 | ticker->tick = nticks; | ||
| 139 | ticker->nticks = nticks; | ||
| 140 | } | ||
| 141 | |||
| 142 | static inline int32_t | ||
| 143 | ticker_geom_read(const ticker_geom_t *ticker) { | ||
| 144 | return ticker->tick; | ||
| 145 | } | ||
| 146 | |||
| 147 | /* Same deal as above. */ | ||
| 148 | #if defined(__GNUC__) && !defined(__clang__) \ | ||
| 149 | && (defined(__x86_64__) || defined(__i386__)) | ||
| 150 | JEMALLOC_NOINLINE | ||
| 151 | #endif | ||
| 152 | static bool | ||
| 153 | ticker_geom_fixup(ticker_geom_t *ticker, uint64_t *prng_state) { | ||
| 154 | uint64_t idx = prng_lg_range_u64(prng_state, TICKER_GEOM_NBITS); | ||
| 155 | ticker->tick = (uint32_t)( | ||
| 156 | (uint64_t)ticker->nticks * (uint64_t)ticker_geom_table[idx] | ||
| 157 | / (uint64_t)TICKER_GEOM_MUL); | ||
| 158 | return true; | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline bool | ||
| 162 | ticker_geom_ticks(ticker_geom_t *ticker, uint64_t *prng_state, int32_t nticks) { | ||
| 163 | ticker->tick -= nticks; | ||
| 164 | if (unlikely(ticker->tick < 0)) { | ||
| 165 | return ticker_geom_fixup(ticker, prng_state); | ||
| 166 | } | ||
| 167 | return false; | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline bool | ||
| 171 | ticker_geom_tick(ticker_geom_t *ticker, uint64_t *prng_state) { | ||
| 172 | return ticker_geom_ticks(ticker, prng_state, 1); | ||
| 173 | } | ||
| 174 | |||
| 175 | #endif /* JEMALLOC_INTERNAL_TICKER_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd.h deleted file mode 100644 index 66d6882..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd.h +++ /dev/null | |||
| @@ -1,518 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TSD_H | ||
| 2 | #define JEMALLOC_INTERNAL_TSD_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/activity_callback.h" | ||
| 5 | #include "jemalloc/internal/arena_types.h" | ||
| 6 | #include "jemalloc/internal/assert.h" | ||
| 7 | #include "jemalloc/internal/bin_types.h" | ||
| 8 | #include "jemalloc/internal/jemalloc_internal_externs.h" | ||
| 9 | #include "jemalloc/internal/peak.h" | ||
| 10 | #include "jemalloc/internal/prof_types.h" | ||
| 11 | #include "jemalloc/internal/ql.h" | ||
| 12 | #include "jemalloc/internal/rtree_tsd.h" | ||
| 13 | #include "jemalloc/internal/tcache_types.h" | ||
| 14 | #include "jemalloc/internal/tcache_structs.h" | ||
| 15 | #include "jemalloc/internal/util.h" | ||
| 16 | #include "jemalloc/internal/witness.h" | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Thread-Specific-Data layout | ||
| 20 | * | ||
| 21 | * At least some thread-local data gets touched on the fast-path of almost all | ||
| 22 | * malloc operations. But much of it is only necessary down slow-paths, or | ||
| 23 | * testing. We want to colocate the fast-path data so that it can live on the | ||
| 24 | * same cacheline if possible. So we define three tiers of hotness: | ||
| 25 | * TSD_DATA_FAST: Touched on the alloc/dalloc fast paths. | ||
| 26 | * TSD_DATA_SLOW: Touched down slow paths. "Slow" here is sort of general; | ||
| 27 | * there are "semi-slow" paths like "not a sized deallocation, but can still | ||
| 28 | * live in the tcache". We'll want to keep these closer to the fast-path | ||
| 29 | * data. | ||
| 30 | * TSD_DATA_SLOWER: Only touched in test or debug modes, or not touched at all. | ||
| 31 | * | ||
| 32 | * An additional concern is that the larger tcache bins won't be used (we have a | ||
| 33 | * bin per size class, but by default only cache relatively small objects). So | ||
| 34 | * the earlier bins are in the TSD_DATA_FAST tier, but the later ones are in the | ||
| 35 | * TSD_DATA_SLOWER tier. | ||
| 36 | * | ||
| 37 | * As a result of all this, we put the slow data first, then the fast data, then | ||
| 38 | * the slower data, while keeping the tcache as the last element of the fast | ||
| 39 | * data (so that the fast -> slower transition happens midway through the | ||
| 40 | * tcache). While we don't yet play alignment tricks to guarantee it, this | ||
| 41 | * increases our odds of getting some cache/page locality on fast paths. | ||
| 42 | */ | ||
| 43 | |||
| 44 | #ifdef JEMALLOC_JET | ||
| 45 | typedef void (*test_callback_t)(int *); | ||
| 46 | # define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 | ||
| 47 | # define MALLOC_TEST_TSD \ | ||
| 48 | O(test_data, int, int) \ | ||
| 49 | O(test_callback, test_callback_t, int) | ||
| 50 | # define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL | ||
| 51 | #else | ||
| 52 | # define MALLOC_TEST_TSD | ||
| 53 | # define MALLOC_TEST_TSD_INITIALIZER | ||
| 54 | #endif | ||
| 55 | |||
| 56 | typedef ql_elm(tsd_t) tsd_link_t; | ||
| 57 | |||
| 58 | /* O(name, type, nullable type) */ | ||
| 59 | #define TSD_DATA_SLOW \ | ||
| 60 | O(tcache_enabled, bool, bool) \ | ||
| 61 | O(reentrancy_level, int8_t, int8_t) \ | ||
| 62 | O(thread_allocated_last_event, uint64_t, uint64_t) \ | ||
| 63 | O(thread_allocated_next_event, uint64_t, uint64_t) \ | ||
| 64 | O(thread_deallocated_last_event, uint64_t, uint64_t) \ | ||
| 65 | O(thread_deallocated_next_event, uint64_t, uint64_t) \ | ||
| 66 | O(tcache_gc_event_wait, uint64_t, uint64_t) \ | ||
| 67 | O(tcache_gc_dalloc_event_wait, uint64_t, uint64_t) \ | ||
| 68 | O(prof_sample_event_wait, uint64_t, uint64_t) \ | ||
| 69 | O(prof_sample_last_event, uint64_t, uint64_t) \ | ||
| 70 | O(stats_interval_event_wait, uint64_t, uint64_t) \ | ||
| 71 | O(stats_interval_last_event, uint64_t, uint64_t) \ | ||
| 72 | O(peak_alloc_event_wait, uint64_t, uint64_t) \ | ||
| 73 | O(peak_dalloc_event_wait, uint64_t, uint64_t) \ | ||
| 74 | O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ | ||
| 75 | O(prng_state, uint64_t, uint64_t) \ | ||
| 76 | O(san_extents_until_guard_small, uint64_t, uint64_t) \ | ||
| 77 | O(san_extents_until_guard_large, uint64_t, uint64_t) \ | ||
| 78 | O(iarena, arena_t *, arena_t *) \ | ||
| 79 | O(arena, arena_t *, arena_t *) \ | ||
| 80 | O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \ | ||
| 81 | O(sec_shard, uint8_t, uint8_t) \ | ||
| 82 | O(binshards, tsd_binshards_t, tsd_binshards_t)\ | ||
| 83 | O(tsd_link, tsd_link_t, tsd_link_t) \ | ||
| 84 | O(in_hook, bool, bool) \ | ||
| 85 | O(peak, peak_t, peak_t) \ | ||
| 86 | O(activity_callback_thunk, activity_callback_thunk_t, \ | ||
| 87 | activity_callback_thunk_t) \ | ||
| 88 | O(tcache_slow, tcache_slow_t, tcache_slow_t) \ | ||
| 89 | O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) | ||
| 90 | |||
| 91 | #define TSD_DATA_SLOW_INITIALIZER \ | ||
| 92 | /* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \ | ||
| 93 | /* reentrancy_level */ 0, \ | ||
| 94 | /* thread_allocated_last_event */ 0, \ | ||
| 95 | /* thread_allocated_next_event */ 0, \ | ||
| 96 | /* thread_deallocated_last_event */ 0, \ | ||
| 97 | /* thread_deallocated_next_event */ 0, \ | ||
| 98 | /* tcache_gc_event_wait */ 0, \ | ||
| 99 | /* tcache_gc_dalloc_event_wait */ 0, \ | ||
| 100 | /* prof_sample_event_wait */ 0, \ | ||
| 101 | /* prof_sample_last_event */ 0, \ | ||
| 102 | /* stats_interval_event_wait */ 0, \ | ||
| 103 | /* stats_interval_last_event */ 0, \ | ||
| 104 | /* peak_alloc_event_wait */ 0, \ | ||
| 105 | /* peak_dalloc_event_wait */ 0, \ | ||
| 106 | /* prof_tdata */ NULL, \ | ||
| 107 | /* prng_state */ 0, \ | ||
| 108 | /* san_extents_until_guard_small */ 0, \ | ||
| 109 | /* san_extents_until_guard_large */ 0, \ | ||
| 110 | /* iarena */ NULL, \ | ||
| 111 | /* arena */ NULL, \ | ||
| 112 | /* arena_decay_ticker */ \ | ||
| 113 | TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \ | ||
| 114 | /* sec_shard */ (uint8_t)-1, \ | ||
| 115 | /* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \ | ||
| 116 | /* tsd_link */ {NULL}, \ | ||
| 117 | /* in_hook */ false, \ | ||
| 118 | /* peak */ PEAK_INITIALIZER, \ | ||
| 119 | /* activity_callback_thunk */ \ | ||
| 120 | ACTIVITY_CALLBACK_THUNK_INITIALIZER, \ | ||
| 121 | /* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \ | ||
| 122 | /* rtree_ctx */ RTREE_CTX_INITIALIZER, | ||
| 123 | |||
| 124 | /* O(name, type, nullable type) */ | ||
| 125 | #define TSD_DATA_FAST \ | ||
| 126 | O(thread_allocated, uint64_t, uint64_t) \ | ||
| 127 | O(thread_allocated_next_event_fast, uint64_t, uint64_t) \ | ||
| 128 | O(thread_deallocated, uint64_t, uint64_t) \ | ||
| 129 | O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \ | ||
| 130 | O(tcache, tcache_t, tcache_t) | ||
| 131 | |||
| 132 | #define TSD_DATA_FAST_INITIALIZER \ | ||
| 133 | /* thread_allocated */ 0, \ | ||
| 134 | /* thread_allocated_next_event_fast */ 0, \ | ||
| 135 | /* thread_deallocated */ 0, \ | ||
| 136 | /* thread_deallocated_next_event_fast */ 0, \ | ||
| 137 | /* tcache */ TCACHE_ZERO_INITIALIZER, | ||
| 138 | |||
| 139 | /* O(name, type, nullable type) */ | ||
| 140 | #define TSD_DATA_SLOWER \ | ||
| 141 | O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ | ||
| 142 | MALLOC_TEST_TSD | ||
| 143 | |||
| 144 | #define TSD_DATA_SLOWER_INITIALIZER \ | ||
| 145 | /* witness */ WITNESS_TSD_INITIALIZER \ | ||
| 146 | /* test data */ MALLOC_TEST_TSD_INITIALIZER | ||
| 147 | |||
| 148 | |||
| 149 | #define TSD_INITIALIZER { \ | ||
| 150 | TSD_DATA_SLOW_INITIALIZER \ | ||
| 151 | /* state */ ATOMIC_INIT(tsd_state_uninitialized), \ | ||
| 152 | TSD_DATA_FAST_INITIALIZER \ | ||
| 153 | TSD_DATA_SLOWER_INITIALIZER \ | ||
| 154 | } | ||
| 155 | |||
| 156 | #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) | ||
| 157 | void _malloc_tsd_cleanup_register(bool (*f)(void)); | ||
| 158 | #endif | ||
| 159 | |||
| 160 | void *malloc_tsd_malloc(size_t size); | ||
| 161 | void malloc_tsd_dalloc(void *wrapper); | ||
| 162 | tsd_t *malloc_tsd_boot0(void); | ||
| 163 | void malloc_tsd_boot1(void); | ||
| 164 | void tsd_cleanup(void *arg); | ||
| 165 | tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); | ||
| 166 | void tsd_state_set(tsd_t *tsd, uint8_t new_state); | ||
| 167 | void tsd_slow_update(tsd_t *tsd); | ||
| 168 | void tsd_prefork(tsd_t *tsd); | ||
| 169 | void tsd_postfork_parent(tsd_t *tsd); | ||
| 170 | void tsd_postfork_child(tsd_t *tsd); | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Call ..._inc when your module wants to take all threads down the slow paths, | ||
| 174 | * and ..._dec when it no longer needs to. | ||
| 175 | */ | ||
| 176 | void tsd_global_slow_inc(tsdn_t *tsdn); | ||
| 177 | void tsd_global_slow_dec(tsdn_t *tsdn); | ||
| 178 | bool tsd_global_slow(); | ||
| 179 | |||
| 180 | enum { | ||
| 181 | /* Common case --> jnz. */ | ||
| 182 | tsd_state_nominal = 0, | ||
| 183 | /* Initialized but on slow path. */ | ||
| 184 | tsd_state_nominal_slow = 1, | ||
| 185 | /* | ||
| 186 | * Some thread has changed global state in such a way that all nominal | ||
| 187 | * threads need to recompute their fast / slow status the next time they | ||
| 188 | * get a chance. | ||
| 189 | * | ||
| 190 | * Any thread can change another thread's status *to* recompute, but | ||
| 191 | * threads are the only ones who can change their status *from* | ||
| 192 | * recompute. | ||
| 193 | */ | ||
| 194 | tsd_state_nominal_recompute = 2, | ||
| 195 | /* | ||
| 196 | * The above nominal states should be lower values. We use | ||
| 197 | * tsd_nominal_max to separate nominal states from threads in the | ||
| 198 | * process of being born / dying. | ||
| 199 | */ | ||
| 200 | tsd_state_nominal_max = 2, | ||
| 201 | |||
| 202 | /* | ||
| 203 | * A thread might free() during its death as its only allocator action; | ||
| 204 | * in such scenarios, we need tsd, but set up in such a way that no | ||
| 205 | * cleanup is necessary. | ||
| 206 | */ | ||
| 207 | tsd_state_minimal_initialized = 3, | ||
| 208 | /* States during which we know we're in thread death. */ | ||
| 209 | tsd_state_purgatory = 4, | ||
| 210 | tsd_state_reincarnated = 5, | ||
| 211 | /* | ||
| 212 | * What it says on the tin; tsd that hasn't been initialized. Note | ||
| 213 | * that even when the tsd struct lives in TLS, when need to keep track | ||
| 214 | * of stuff like whether or not our pthread destructors have been | ||
| 215 | * scheduled, so this really truly is different than the nominal state. | ||
| 216 | */ | ||
| 217 | tsd_state_uninitialized = 6 | ||
| 218 | }; | ||
| 219 | |||
| 220 | /* | ||
| 221 | * Some TSD accesses can only be done in a nominal state. To enforce this, we | ||
| 222 | * wrap TSD member access in a function that asserts on TSD state, and mangle | ||
| 223 | * field names to prevent touching them accidentally. | ||
| 224 | */ | ||
| 225 | #define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n | ||
| 226 | |||
| 227 | #ifdef JEMALLOC_U8_ATOMICS | ||
| 228 | # define tsd_state_t atomic_u8_t | ||
| 229 | # define tsd_atomic_load atomic_load_u8 | ||
| 230 | # define tsd_atomic_store atomic_store_u8 | ||
| 231 | # define tsd_atomic_exchange atomic_exchange_u8 | ||
| 232 | #else | ||
| 233 | # define tsd_state_t atomic_u32_t | ||
| 234 | # define tsd_atomic_load atomic_load_u32 | ||
| 235 | # define tsd_atomic_store atomic_store_u32 | ||
| 236 | # define tsd_atomic_exchange atomic_exchange_u32 | ||
| 237 | #endif | ||
| 238 | |||
| 239 | /* The actual tsd. */ | ||
| 240 | struct tsd_s { | ||
| 241 | /* | ||
| 242 | * The contents should be treated as totally opaque outside the tsd | ||
| 243 | * module. Access any thread-local state through the getters and | ||
| 244 | * setters below. | ||
| 245 | */ | ||
| 246 | |||
| 247 | #define O(n, t, nt) \ | ||
| 248 | t TSD_MANGLE(n); | ||
| 249 | |||
| 250 | TSD_DATA_SLOW | ||
| 251 | /* | ||
| 252 | * We manually limit the state to just a single byte. Unless the 8-bit | ||
| 253 | * atomics are unavailable (which is rare). | ||
| 254 | */ | ||
| 255 | tsd_state_t state; | ||
| 256 | TSD_DATA_FAST | ||
| 257 | TSD_DATA_SLOWER | ||
| 258 | #undef O | ||
| 259 | }; | ||
| 260 | |||
| 261 | JEMALLOC_ALWAYS_INLINE uint8_t | ||
| 262 | tsd_state_get(tsd_t *tsd) { | ||
| 263 | /* | ||
| 264 | * This should be atomic. Unfortunately, compilers right now can't tell | ||
| 265 | * that this can be done as a memory comparison, and forces a load into | ||
| 266 | * a register that hurts fast-path performance. | ||
| 267 | */ | ||
| 268 | /* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */ | ||
| 269 | return *(uint8_t *)&tsd->state; | ||
| 270 | } | ||
| 271 | |||
| 272 | /* | ||
| 273 | * Wrapper around tsd_t that makes it possible to avoid implicit conversion | ||
| 274 | * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be | ||
| 275 | * explicitly converted to tsd_t, which is non-nullable. | ||
| 276 | */ | ||
| 277 | struct tsdn_s { | ||
| 278 | tsd_t tsd; | ||
| 279 | }; | ||
| 280 | #define TSDN_NULL ((tsdn_t *)0) | ||
| 281 | JEMALLOC_ALWAYS_INLINE tsdn_t * | ||
| 282 | tsd_tsdn(tsd_t *tsd) { | ||
| 283 | return (tsdn_t *)tsd; | ||
| 284 | } | ||
| 285 | |||
| 286 | JEMALLOC_ALWAYS_INLINE bool | ||
| 287 | tsdn_null(const tsdn_t *tsdn) { | ||
| 288 | return tsdn == NULL; | ||
| 289 | } | ||
| 290 | |||
| 291 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 292 | tsdn_tsd(tsdn_t *tsdn) { | ||
| 293 | assert(!tsdn_null(tsdn)); | ||
| 294 | |||
| 295 | return &tsdn->tsd; | ||
| 296 | } | ||
| 297 | |||
| 298 | /* | ||
| 299 | * We put the platform-specific data declarations and inlines into their own | ||
| 300 | * header files to avoid cluttering this file. They define tsd_boot0, | ||
| 301 | * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. | ||
| 302 | */ | ||
| 303 | #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP | ||
| 304 | #include "jemalloc/internal/tsd_malloc_thread_cleanup.h" | ||
| 305 | #elif (defined(JEMALLOC_TLS)) | ||
| 306 | #include "jemalloc/internal/tsd_tls.h" | ||
| 307 | #elif (defined(_WIN32)) | ||
| 308 | #include "jemalloc/internal/tsd_win.h" | ||
| 309 | #else | ||
| 310 | #include "jemalloc/internal/tsd_generic.h" | ||
| 311 | #endif | ||
| 312 | |||
| 313 | /* | ||
| 314 | * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of | ||
| 315 | * foo. This omits some safety checks, and so can be used during tsd | ||
| 316 | * initialization and cleanup. | ||
| 317 | */ | ||
| 318 | #define O(n, t, nt) \ | ||
| 319 | JEMALLOC_ALWAYS_INLINE t * \ | ||
| 320 | tsd_##n##p_get_unsafe(tsd_t *tsd) { \ | ||
| 321 | return &tsd->TSD_MANGLE(n); \ | ||
| 322 | } | ||
| 323 | TSD_DATA_SLOW | ||
| 324 | TSD_DATA_FAST | ||
| 325 | TSD_DATA_SLOWER | ||
| 326 | #undef O | ||
| 327 | |||
| 328 | /* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ | ||
| 329 | #define O(n, t, nt) \ | ||
| 330 | JEMALLOC_ALWAYS_INLINE t * \ | ||
| 331 | tsd_##n##p_get(tsd_t *tsd) { \ | ||
| 332 | /* \ | ||
| 333 | * Because the state might change asynchronously if it's \ | ||
| 334 | * nominal, we need to make sure that we only read it once. \ | ||
| 335 | */ \ | ||
| 336 | uint8_t state = tsd_state_get(tsd); \ | ||
| 337 | assert(state == tsd_state_nominal || \ | ||
| 338 | state == tsd_state_nominal_slow || \ | ||
| 339 | state == tsd_state_nominal_recompute || \ | ||
| 340 | state == tsd_state_reincarnated || \ | ||
| 341 | state == tsd_state_minimal_initialized); \ | ||
| 342 | return tsd_##n##p_get_unsafe(tsd); \ | ||
| 343 | } | ||
| 344 | TSD_DATA_SLOW | ||
| 345 | TSD_DATA_FAST | ||
| 346 | TSD_DATA_SLOWER | ||
| 347 | #undef O | ||
| 348 | |||
| 349 | /* | ||
| 350 | * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn | ||
| 351 | * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. | ||
| 352 | */ | ||
| 353 | #define O(n, t, nt) \ | ||
| 354 | JEMALLOC_ALWAYS_INLINE nt * \ | ||
| 355 | tsdn_##n##p_get(tsdn_t *tsdn) { \ | ||
| 356 | if (tsdn_null(tsdn)) { \ | ||
| 357 | return NULL; \ | ||
| 358 | } \ | ||
| 359 | tsd_t *tsd = tsdn_tsd(tsdn); \ | ||
| 360 | return (nt *)tsd_##n##p_get(tsd); \ | ||
| 361 | } | ||
| 362 | TSD_DATA_SLOW | ||
| 363 | TSD_DATA_FAST | ||
| 364 | TSD_DATA_SLOWER | ||
| 365 | #undef O | ||
| 366 | |||
| 367 | /* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ | ||
| 368 | #define O(n, t, nt) \ | ||
| 369 | JEMALLOC_ALWAYS_INLINE t \ | ||
| 370 | tsd_##n##_get(tsd_t *tsd) { \ | ||
| 371 | return *tsd_##n##p_get(tsd); \ | ||
| 372 | } | ||
| 373 | TSD_DATA_SLOW | ||
| 374 | TSD_DATA_FAST | ||
| 375 | TSD_DATA_SLOWER | ||
| 376 | #undef O | ||
| 377 | |||
| 378 | /* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ | ||
| 379 | #define O(n, t, nt) \ | ||
| 380 | JEMALLOC_ALWAYS_INLINE void \ | ||
| 381 | tsd_##n##_set(tsd_t *tsd, t val) { \ | ||
| 382 | assert(tsd_state_get(tsd) != tsd_state_reincarnated && \ | ||
| 383 | tsd_state_get(tsd) != tsd_state_minimal_initialized); \ | ||
| 384 | *tsd_##n##p_get(tsd) = val; \ | ||
| 385 | } | ||
| 386 | TSD_DATA_SLOW | ||
| 387 | TSD_DATA_FAST | ||
| 388 | TSD_DATA_SLOWER | ||
| 389 | #undef O | ||
| 390 | |||
| 391 | JEMALLOC_ALWAYS_INLINE void | ||
| 392 | tsd_assert_fast(tsd_t *tsd) { | ||
| 393 | /* | ||
| 394 | * Note that our fastness assertion does *not* include global slowness | ||
| 395 | * counters; it's not in general possible to ensure that they won't | ||
| 396 | * change asynchronously from underneath us. | ||
| 397 | */ | ||
| 398 | assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && | ||
| 399 | tsd_reentrancy_level_get(tsd) == 0); | ||
| 400 | } | ||
| 401 | |||
| 402 | JEMALLOC_ALWAYS_INLINE bool | ||
| 403 | tsd_fast(tsd_t *tsd) { | ||
| 404 | bool fast = (tsd_state_get(tsd) == tsd_state_nominal); | ||
| 405 | if (fast) { | ||
| 406 | tsd_assert_fast(tsd); | ||
| 407 | } | ||
| 408 | |||
| 409 | return fast; | ||
| 410 | } | ||
| 411 | |||
| 412 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 413 | tsd_fetch_impl(bool init, bool minimal) { | ||
| 414 | tsd_t *tsd = tsd_get(init); | ||
| 415 | |||
| 416 | if (!init && tsd_get_allocates() && tsd == NULL) { | ||
| 417 | return NULL; | ||
| 418 | } | ||
| 419 | assert(tsd != NULL); | ||
| 420 | |||
| 421 | if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) { | ||
| 422 | return tsd_fetch_slow(tsd, minimal); | ||
| 423 | } | ||
| 424 | assert(tsd_fast(tsd)); | ||
| 425 | tsd_assert_fast(tsd); | ||
| 426 | |||
| 427 | return tsd; | ||
| 428 | } | ||
| 429 | |||
| 430 | /* Get a minimal TSD that requires no cleanup. See comments in free(). */ | ||
| 431 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 432 | tsd_fetch_min(void) { | ||
| 433 | return tsd_fetch_impl(true, true); | ||
| 434 | } | ||
| 435 | |||
| 436 | /* For internal background threads use only. */ | ||
| 437 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 438 | tsd_internal_fetch(void) { | ||
| 439 | tsd_t *tsd = tsd_fetch_min(); | ||
| 440 | /* Use reincarnated state to prevent full initialization. */ | ||
| 441 | tsd_state_set(tsd, tsd_state_reincarnated); | ||
| 442 | |||
| 443 | return tsd; | ||
| 444 | } | ||
| 445 | |||
| 446 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 447 | tsd_fetch(void) { | ||
| 448 | return tsd_fetch_impl(true, false); | ||
| 449 | } | ||
| 450 | |||
| 451 | static inline bool | ||
| 452 | tsd_nominal(tsd_t *tsd) { | ||
| 453 | bool nominal = tsd_state_get(tsd) <= tsd_state_nominal_max; | ||
| 454 | assert(nominal || tsd_reentrancy_level_get(tsd) > 0); | ||
| 455 | |||
| 456 | return nominal; | ||
| 457 | } | ||
| 458 | |||
| 459 | JEMALLOC_ALWAYS_INLINE tsdn_t * | ||
| 460 | tsdn_fetch(void) { | ||
| 461 | if (!tsd_booted_get()) { | ||
| 462 | return NULL; | ||
| 463 | } | ||
| 464 | |||
| 465 | return tsd_tsdn(tsd_fetch_impl(false, false)); | ||
| 466 | } | ||
| 467 | |||
| 468 | JEMALLOC_ALWAYS_INLINE rtree_ctx_t * | ||
| 469 | tsd_rtree_ctx(tsd_t *tsd) { | ||
| 470 | return tsd_rtree_ctxp_get(tsd); | ||
| 471 | } | ||
| 472 | |||
| 473 | JEMALLOC_ALWAYS_INLINE rtree_ctx_t * | ||
| 474 | tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { | ||
| 475 | /* | ||
| 476 | * If tsd cannot be accessed, initialize the fallback rtree_ctx and | ||
| 477 | * return a pointer to it. | ||
| 478 | */ | ||
| 479 | if (unlikely(tsdn_null(tsdn))) { | ||
| 480 | rtree_ctx_data_init(fallback); | ||
| 481 | return fallback; | ||
| 482 | } | ||
| 483 | return tsd_rtree_ctx(tsdn_tsd(tsdn)); | ||
| 484 | } | ||
| 485 | |||
| 486 | static inline bool | ||
| 487 | tsd_state_nocleanup(tsd_t *tsd) { | ||
| 488 | return tsd_state_get(tsd) == tsd_state_reincarnated || | ||
| 489 | tsd_state_get(tsd) == tsd_state_minimal_initialized; | ||
| 490 | } | ||
| 491 | |||
| 492 | /* | ||
| 493 | * These "raw" tsd reentrancy functions don't have any debug checking to make | ||
| 494 | * sure that we're not touching arena 0. Better is to call pre_reentrancy and | ||
| 495 | * post_reentrancy if this is possible. | ||
| 496 | */ | ||
| 497 | static inline void | ||
| 498 | tsd_pre_reentrancy_raw(tsd_t *tsd) { | ||
| 499 | bool fast = tsd_fast(tsd); | ||
| 500 | assert(tsd_reentrancy_level_get(tsd) < INT8_MAX); | ||
| 501 | ++*tsd_reentrancy_levelp_get(tsd); | ||
| 502 | if (fast) { | ||
| 503 | /* Prepare slow path for reentrancy. */ | ||
| 504 | tsd_slow_update(tsd); | ||
| 505 | assert(tsd_state_get(tsd) == tsd_state_nominal_slow); | ||
| 506 | } | ||
| 507 | } | ||
| 508 | |||
| 509 | static inline void | ||
| 510 | tsd_post_reentrancy_raw(tsd_t *tsd) { | ||
| 511 | int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); | ||
| 512 | assert(*reentrancy_level > 0); | ||
| 513 | if (--*reentrancy_level == 0) { | ||
| 514 | tsd_slow_update(tsd); | ||
| 515 | } | ||
| 516 | } | ||
| 517 | |||
| 518 | #endif /* JEMALLOC_INTERNAL_TSD_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_generic.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_generic.h deleted file mode 100644 index a718472..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_generic.h +++ /dev/null | |||
| @@ -1,182 +0,0 @@ | |||
| 1 | #ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H | ||
| 2 | #error This file should be included only once, by tsd.h. | ||
| 3 | #endif | ||
| 4 | #define JEMALLOC_INTERNAL_TSD_GENERIC_H | ||
| 5 | |||
| 6 | typedef struct tsd_init_block_s tsd_init_block_t; | ||
| 7 | struct tsd_init_block_s { | ||
| 8 | ql_elm(tsd_init_block_t) link; | ||
| 9 | pthread_t thread; | ||
| 10 | void *data; | ||
| 11 | }; | ||
| 12 | |||
| 13 | /* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ | ||
| 14 | typedef struct tsd_init_head_s tsd_init_head_t; | ||
| 15 | |||
| 16 | typedef struct { | ||
| 17 | bool initialized; | ||
| 18 | tsd_t val; | ||
| 19 | } tsd_wrapper_t; | ||
| 20 | |||
| 21 | void *tsd_init_check_recursion(tsd_init_head_t *head, | ||
| 22 | tsd_init_block_t *block); | ||
| 23 | void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); | ||
| 24 | |||
| 25 | extern pthread_key_t tsd_tsd; | ||
| 26 | extern tsd_init_head_t tsd_init_head; | ||
| 27 | extern tsd_wrapper_t tsd_boot_wrapper; | ||
| 28 | extern bool tsd_booted; | ||
| 29 | |||
| 30 | /* Initialization/cleanup. */ | ||
| 31 | JEMALLOC_ALWAYS_INLINE void | ||
| 32 | tsd_cleanup_wrapper(void *arg) { | ||
| 33 | tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg; | ||
| 34 | |||
| 35 | if (wrapper->initialized) { | ||
| 36 | wrapper->initialized = false; | ||
| 37 | tsd_cleanup(&wrapper->val); | ||
| 38 | if (wrapper->initialized) { | ||
| 39 | /* Trigger another cleanup round. */ | ||
| 40 | if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) | ||
| 41 | { | ||
| 42 | malloc_write("<jemalloc>: Error setting TSD\n"); | ||
| 43 | if (opt_abort) { | ||
| 44 | abort(); | ||
| 45 | } | ||
| 46 | } | ||
| 47 | return; | ||
| 48 | } | ||
| 49 | } | ||
| 50 | malloc_tsd_dalloc(wrapper); | ||
| 51 | } | ||
| 52 | |||
| 53 | JEMALLOC_ALWAYS_INLINE void | ||
| 54 | tsd_wrapper_set(tsd_wrapper_t *wrapper) { | ||
| 55 | if (unlikely(!tsd_booted)) { | ||
| 56 | return; | ||
| 57 | } | ||
| 58 | if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { | ||
| 59 | malloc_write("<jemalloc>: Error setting TSD\n"); | ||
| 60 | abort(); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | |||
| 64 | JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * | ||
| 65 | tsd_wrapper_get(bool init) { | ||
| 66 | tsd_wrapper_t *wrapper; | ||
| 67 | |||
| 68 | if (unlikely(!tsd_booted)) { | ||
| 69 | return &tsd_boot_wrapper; | ||
| 70 | } | ||
| 71 | |||
| 72 | wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); | ||
| 73 | |||
| 74 | if (init && unlikely(wrapper == NULL)) { | ||
| 75 | tsd_init_block_t block; | ||
| 76 | wrapper = (tsd_wrapper_t *) | ||
| 77 | tsd_init_check_recursion(&tsd_init_head, &block); | ||
| 78 | if (wrapper) { | ||
| 79 | return wrapper; | ||
| 80 | } | ||
| 81 | wrapper = (tsd_wrapper_t *) | ||
| 82 | malloc_tsd_malloc(sizeof(tsd_wrapper_t)); | ||
| 83 | block.data = (void *)wrapper; | ||
| 84 | if (wrapper == NULL) { | ||
| 85 | malloc_write("<jemalloc>: Error allocating TSD\n"); | ||
| 86 | abort(); | ||
| 87 | } else { | ||
| 88 | wrapper->initialized = false; | ||
| 89 | JEMALLOC_DIAGNOSTIC_PUSH | ||
| 90 | JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS | ||
| 91 | tsd_t initializer = TSD_INITIALIZER; | ||
| 92 | JEMALLOC_DIAGNOSTIC_POP | ||
| 93 | wrapper->val = initializer; | ||
| 94 | } | ||
| 95 | tsd_wrapper_set(wrapper); | ||
| 96 | tsd_init_finish(&tsd_init_head, &block); | ||
| 97 | } | ||
| 98 | return wrapper; | ||
| 99 | } | ||
| 100 | |||
| 101 | JEMALLOC_ALWAYS_INLINE bool | ||
| 102 | tsd_boot0(void) { | ||
| 103 | tsd_wrapper_t *wrapper; | ||
| 104 | tsd_init_block_t block; | ||
| 105 | |||
| 106 | wrapper = (tsd_wrapper_t *) | ||
| 107 | tsd_init_check_recursion(&tsd_init_head, &block); | ||
| 108 | if (wrapper) { | ||
| 109 | return false; | ||
| 110 | } | ||
| 111 | block.data = &tsd_boot_wrapper; | ||
| 112 | if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) { | ||
| 113 | return true; | ||
| 114 | } | ||
| 115 | tsd_booted = true; | ||
| 116 | tsd_wrapper_set(&tsd_boot_wrapper); | ||
| 117 | tsd_init_finish(&tsd_init_head, &block); | ||
| 118 | return false; | ||
| 119 | } | ||
| 120 | |||
| 121 | JEMALLOC_ALWAYS_INLINE void | ||
| 122 | tsd_boot1(void) { | ||
| 123 | tsd_wrapper_t *wrapper; | ||
| 124 | wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t)); | ||
| 125 | if (wrapper == NULL) { | ||
| 126 | malloc_write("<jemalloc>: Error allocating TSD\n"); | ||
| 127 | abort(); | ||
| 128 | } | ||
| 129 | tsd_boot_wrapper.initialized = false; | ||
| 130 | tsd_cleanup(&tsd_boot_wrapper.val); | ||
| 131 | wrapper->initialized = false; | ||
| 132 | JEMALLOC_DIAGNOSTIC_PUSH | ||
| 133 | JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS | ||
| 134 | tsd_t initializer = TSD_INITIALIZER; | ||
| 135 | JEMALLOC_DIAGNOSTIC_POP | ||
| 136 | wrapper->val = initializer; | ||
| 137 | tsd_wrapper_set(wrapper); | ||
| 138 | } | ||
| 139 | |||
| 140 | JEMALLOC_ALWAYS_INLINE bool | ||
| 141 | tsd_boot(void) { | ||
| 142 | if (tsd_boot0()) { | ||
| 143 | return true; | ||
| 144 | } | ||
| 145 | tsd_boot1(); | ||
| 146 | return false; | ||
| 147 | } | ||
| 148 | |||
| 149 | JEMALLOC_ALWAYS_INLINE bool | ||
| 150 | tsd_booted_get(void) { | ||
| 151 | return tsd_booted; | ||
| 152 | } | ||
| 153 | |||
| 154 | JEMALLOC_ALWAYS_INLINE bool | ||
| 155 | tsd_get_allocates(void) { | ||
| 156 | return true; | ||
| 157 | } | ||
| 158 | |||
| 159 | /* Get/set. */ | ||
| 160 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 161 | tsd_get(bool init) { | ||
| 162 | tsd_wrapper_t *wrapper; | ||
| 163 | |||
| 164 | assert(tsd_booted); | ||
| 165 | wrapper = tsd_wrapper_get(init); | ||
| 166 | if (tsd_get_allocates() && !init && wrapper == NULL) { | ||
| 167 | return NULL; | ||
| 168 | } | ||
| 169 | return &wrapper->val; | ||
| 170 | } | ||
| 171 | |||
| 172 | JEMALLOC_ALWAYS_INLINE void | ||
| 173 | tsd_set(tsd_t *val) { | ||
| 174 | tsd_wrapper_t *wrapper; | ||
| 175 | |||
| 176 | assert(tsd_booted); | ||
| 177 | wrapper = tsd_wrapper_get(true); | ||
| 178 | if (likely(&wrapper->val != val)) { | ||
| 179 | wrapper->val = *(val); | ||
| 180 | } | ||
| 181 | wrapper->initialized = true; | ||
| 182 | } | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h deleted file mode 100644 index d8f3ef1..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h +++ /dev/null | |||
| @@ -1,61 +0,0 @@ | |||
| 1 | #ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H | ||
| 2 | #error This file should be included only once, by tsd.h. | ||
| 3 | #endif | ||
| 4 | #define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H | ||
| 5 | |||
| 6 | #define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL | ||
| 7 | |||
| 8 | extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; | ||
| 9 | extern JEMALLOC_TSD_TYPE_ATTR(bool) tsd_initialized; | ||
| 10 | extern bool tsd_booted; | ||
| 11 | |||
| 12 | /* Initialization/cleanup. */ | ||
| 13 | JEMALLOC_ALWAYS_INLINE bool | ||
| 14 | tsd_cleanup_wrapper(void) { | ||
| 15 | if (tsd_initialized) { | ||
| 16 | tsd_initialized = false; | ||
| 17 | tsd_cleanup(&tsd_tls); | ||
| 18 | } | ||
| 19 | return tsd_initialized; | ||
| 20 | } | ||
| 21 | |||
| 22 | JEMALLOC_ALWAYS_INLINE bool | ||
| 23 | tsd_boot0(void) { | ||
| 24 | _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); | ||
| 25 | tsd_booted = true; | ||
| 26 | return false; | ||
| 27 | } | ||
| 28 | |||
| 29 | JEMALLOC_ALWAYS_INLINE void | ||
| 30 | tsd_boot1(void) { | ||
| 31 | /* Do nothing. */ | ||
| 32 | } | ||
| 33 | |||
| 34 | JEMALLOC_ALWAYS_INLINE bool | ||
| 35 | tsd_boot(void) { | ||
| 36 | return tsd_boot0(); | ||
| 37 | } | ||
| 38 | |||
| 39 | JEMALLOC_ALWAYS_INLINE bool | ||
| 40 | tsd_booted_get(void) { | ||
| 41 | return tsd_booted; | ||
| 42 | } | ||
| 43 | |||
| 44 | JEMALLOC_ALWAYS_INLINE bool | ||
| 45 | tsd_get_allocates(void) { | ||
| 46 | return false; | ||
| 47 | } | ||
| 48 | |||
| 49 | /* Get/set. */ | ||
| 50 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 51 | tsd_get(bool init) { | ||
| 52 | return &tsd_tls; | ||
| 53 | } | ||
| 54 | JEMALLOC_ALWAYS_INLINE void | ||
| 55 | tsd_set(tsd_t *val) { | ||
| 56 | assert(tsd_booted); | ||
| 57 | if (likely(&tsd_tls != val)) { | ||
| 58 | tsd_tls = (*val); | ||
| 59 | } | ||
| 60 | tsd_initialized = true; | ||
| 61 | } | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_tls.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_tls.h deleted file mode 100644 index 7d6c805..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_tls.h +++ /dev/null | |||
| @@ -1,60 +0,0 @@ | |||
| 1 | #ifdef JEMALLOC_INTERNAL_TSD_TLS_H | ||
| 2 | #error This file should be included only once, by tsd.h. | ||
| 3 | #endif | ||
| 4 | #define JEMALLOC_INTERNAL_TSD_TLS_H | ||
| 5 | |||
| 6 | #define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL | ||
| 7 | |||
| 8 | extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; | ||
| 9 | extern pthread_key_t tsd_tsd; | ||
| 10 | extern bool tsd_booted; | ||
| 11 | |||
| 12 | /* Initialization/cleanup. */ | ||
| 13 | JEMALLOC_ALWAYS_INLINE bool | ||
| 14 | tsd_boot0(void) { | ||
| 15 | if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) { | ||
| 16 | return true; | ||
| 17 | } | ||
| 18 | tsd_booted = true; | ||
| 19 | return false; | ||
| 20 | } | ||
| 21 | |||
| 22 | JEMALLOC_ALWAYS_INLINE void | ||
| 23 | tsd_boot1(void) { | ||
| 24 | /* Do nothing. */ | ||
| 25 | } | ||
| 26 | |||
| 27 | JEMALLOC_ALWAYS_INLINE bool | ||
| 28 | tsd_boot(void) { | ||
| 29 | return tsd_boot0(); | ||
| 30 | } | ||
| 31 | |||
| 32 | JEMALLOC_ALWAYS_INLINE bool | ||
| 33 | tsd_booted_get(void) { | ||
| 34 | return tsd_booted; | ||
| 35 | } | ||
| 36 | |||
| 37 | JEMALLOC_ALWAYS_INLINE bool | ||
| 38 | tsd_get_allocates(void) { | ||
| 39 | return false; | ||
| 40 | } | ||
| 41 | |||
| 42 | /* Get/set. */ | ||
| 43 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 44 | tsd_get(bool init) { | ||
| 45 | return &tsd_tls; | ||
| 46 | } | ||
| 47 | |||
| 48 | JEMALLOC_ALWAYS_INLINE void | ||
| 49 | tsd_set(tsd_t *val) { | ||
| 50 | assert(tsd_booted); | ||
| 51 | if (likely(&tsd_tls != val)) { | ||
| 52 | tsd_tls = (*val); | ||
| 53 | } | ||
| 54 | if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) { | ||
| 55 | malloc_write("<jemalloc>: Error setting tsd.\n"); | ||
| 56 | if (opt_abort) { | ||
| 57 | abort(); | ||
| 58 | } | ||
| 59 | } | ||
| 60 | } | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_types.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_types.h deleted file mode 100644 index a6ae37d..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_types.h +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TSD_TYPES_H | ||
| 2 | #define JEMALLOC_INTERNAL_TSD_TYPES_H | ||
| 3 | |||
| 4 | #define MALLOC_TSD_CLEANUPS_MAX 4 | ||
| 5 | |||
| 6 | typedef struct tsd_s tsd_t; | ||
| 7 | typedef struct tsdn_s tsdn_t; | ||
| 8 | typedef bool (*malloc_tsd_cleanup_t)(void); | ||
| 9 | |||
| 10 | #endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_win.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_win.h deleted file mode 100644 index a91dac8..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/tsd_win.h +++ /dev/null | |||
| @@ -1,139 +0,0 @@ | |||
| 1 | #ifdef JEMALLOC_INTERNAL_TSD_WIN_H | ||
| 2 | #error This file should be included only once, by tsd.h. | ||
| 3 | #endif | ||
| 4 | #define JEMALLOC_INTERNAL_TSD_WIN_H | ||
| 5 | |||
| 6 | typedef struct { | ||
| 7 | bool initialized; | ||
| 8 | tsd_t val; | ||
| 9 | } tsd_wrapper_t; | ||
| 10 | |||
| 11 | extern DWORD tsd_tsd; | ||
| 12 | extern tsd_wrapper_t tsd_boot_wrapper; | ||
| 13 | extern bool tsd_booted; | ||
| 14 | |||
| 15 | /* Initialization/cleanup. */ | ||
| 16 | JEMALLOC_ALWAYS_INLINE bool | ||
| 17 | tsd_cleanup_wrapper(void) { | ||
| 18 | DWORD error = GetLastError(); | ||
| 19 | tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); | ||
| 20 | SetLastError(error); | ||
| 21 | |||
| 22 | if (wrapper == NULL) { | ||
| 23 | return false; | ||
| 24 | } | ||
| 25 | |||
| 26 | if (wrapper->initialized) { | ||
| 27 | wrapper->initialized = false; | ||
| 28 | tsd_cleanup(&wrapper->val); | ||
| 29 | if (wrapper->initialized) { | ||
| 30 | /* Trigger another cleanup round. */ | ||
| 31 | return true; | ||
| 32 | } | ||
| 33 | } | ||
| 34 | malloc_tsd_dalloc(wrapper); | ||
| 35 | return false; | ||
| 36 | } | ||
| 37 | |||
| 38 | JEMALLOC_ALWAYS_INLINE void | ||
| 39 | tsd_wrapper_set(tsd_wrapper_t *wrapper) { | ||
| 40 | if (!TlsSetValue(tsd_tsd, (void *)wrapper)) { | ||
| 41 | malloc_write("<jemalloc>: Error setting TSD\n"); | ||
| 42 | abort(); | ||
| 43 | } | ||
| 44 | } | ||
| 45 | |||
| 46 | JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * | ||
| 47 | tsd_wrapper_get(bool init) { | ||
| 48 | DWORD error = GetLastError(); | ||
| 49 | tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd); | ||
| 50 | SetLastError(error); | ||
| 51 | |||
| 52 | if (init && unlikely(wrapper == NULL)) { | ||
| 53 | wrapper = (tsd_wrapper_t *) | ||
| 54 | malloc_tsd_malloc(sizeof(tsd_wrapper_t)); | ||
| 55 | if (wrapper == NULL) { | ||
| 56 | malloc_write("<jemalloc>: Error allocating TSD\n"); | ||
| 57 | abort(); | ||
| 58 | } else { | ||
| 59 | wrapper->initialized = false; | ||
| 60 | /* MSVC is finicky about aggregate initialization. */ | ||
| 61 | tsd_t tsd_initializer = TSD_INITIALIZER; | ||
| 62 | wrapper->val = tsd_initializer; | ||
| 63 | } | ||
| 64 | tsd_wrapper_set(wrapper); | ||
| 65 | } | ||
| 66 | return wrapper; | ||
| 67 | } | ||
| 68 | |||
| 69 | JEMALLOC_ALWAYS_INLINE bool | ||
| 70 | tsd_boot0(void) { | ||
| 71 | tsd_tsd = TlsAlloc(); | ||
| 72 | if (tsd_tsd == TLS_OUT_OF_INDEXES) { | ||
| 73 | return true; | ||
| 74 | } | ||
| 75 | _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); | ||
| 76 | tsd_wrapper_set(&tsd_boot_wrapper); | ||
| 77 | tsd_booted = true; | ||
| 78 | return false; | ||
| 79 | } | ||
| 80 | |||
| 81 | JEMALLOC_ALWAYS_INLINE void | ||
| 82 | tsd_boot1(void) { | ||
| 83 | tsd_wrapper_t *wrapper; | ||
| 84 | wrapper = (tsd_wrapper_t *) | ||
| 85 | malloc_tsd_malloc(sizeof(tsd_wrapper_t)); | ||
| 86 | if (wrapper == NULL) { | ||
| 87 | malloc_write("<jemalloc>: Error allocating TSD\n"); | ||
| 88 | abort(); | ||
| 89 | } | ||
| 90 | tsd_boot_wrapper.initialized = false; | ||
| 91 | tsd_cleanup(&tsd_boot_wrapper.val); | ||
| 92 | wrapper->initialized = false; | ||
| 93 | tsd_t initializer = TSD_INITIALIZER; | ||
| 94 | wrapper->val = initializer; | ||
| 95 | tsd_wrapper_set(wrapper); | ||
| 96 | } | ||
| 97 | JEMALLOC_ALWAYS_INLINE bool | ||
| 98 | tsd_boot(void) { | ||
| 99 | if (tsd_boot0()) { | ||
| 100 | return true; | ||
| 101 | } | ||
| 102 | tsd_boot1(); | ||
| 103 | return false; | ||
| 104 | } | ||
| 105 | |||
| 106 | JEMALLOC_ALWAYS_INLINE bool | ||
| 107 | tsd_booted_get(void) { | ||
| 108 | return tsd_booted; | ||
| 109 | } | ||
| 110 | |||
| 111 | JEMALLOC_ALWAYS_INLINE bool | ||
| 112 | tsd_get_allocates(void) { | ||
| 113 | return true; | ||
| 114 | } | ||
| 115 | |||
| 116 | /* Get/set. */ | ||
| 117 | JEMALLOC_ALWAYS_INLINE tsd_t * | ||
| 118 | tsd_get(bool init) { | ||
| 119 | tsd_wrapper_t *wrapper; | ||
| 120 | |||
| 121 | assert(tsd_booted); | ||
| 122 | wrapper = tsd_wrapper_get(init); | ||
| 123 | if (tsd_get_allocates() && !init && wrapper == NULL) { | ||
| 124 | return NULL; | ||
| 125 | } | ||
| 126 | return &wrapper->val; | ||
| 127 | } | ||
| 128 | |||
| 129 | JEMALLOC_ALWAYS_INLINE void | ||
| 130 | tsd_set(tsd_t *val) { | ||
| 131 | tsd_wrapper_t *wrapper; | ||
| 132 | |||
| 133 | assert(tsd_booted); | ||
| 134 | wrapper = tsd_wrapper_get(true); | ||
| 135 | if (likely(&wrapper->val != val)) { | ||
| 136 | wrapper->val = *(val); | ||
| 137 | } | ||
| 138 | wrapper->initialized = true; | ||
| 139 | } | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/typed_list.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/typed_list.h deleted file mode 100644 index 6535055..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/typed_list.h +++ /dev/null | |||
| @@ -1,55 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_TYPED_LIST_H | ||
| 2 | #define JEMALLOC_INTERNAL_TYPED_LIST_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * This wraps the ql module to implement a list class in a way that's a little | ||
| 6 | * bit easier to use; it handles ql_elm_new calls and provides type safety. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #define TYPED_LIST(list_type, el_type, linkage) \ | ||
| 10 | typedef struct { \ | ||
| 11 | ql_head(el_type) head; \ | ||
| 12 | } list_type##_t; \ | ||
| 13 | static inline void \ | ||
| 14 | list_type##_init(list_type##_t *list) { \ | ||
| 15 | ql_new(&list->head); \ | ||
| 16 | } \ | ||
| 17 | static inline el_type * \ | ||
| 18 | list_type##_first(const list_type##_t *list) { \ | ||
| 19 | return ql_first(&list->head); \ | ||
| 20 | } \ | ||
| 21 | static inline el_type * \ | ||
| 22 | list_type##_last(const list_type##_t *list) { \ | ||
| 23 | return ql_last(&list->head, linkage); \ | ||
| 24 | } \ | ||
| 25 | static inline void \ | ||
| 26 | list_type##_append(list_type##_t *list, el_type *item) { \ | ||
| 27 | ql_elm_new(item, linkage); \ | ||
| 28 | ql_tail_insert(&list->head, item, linkage); \ | ||
| 29 | } \ | ||
| 30 | static inline void \ | ||
| 31 | list_type##_prepend(list_type##_t *list, el_type *item) { \ | ||
| 32 | ql_elm_new(item, linkage); \ | ||
| 33 | ql_head_insert(&list->head, item, linkage); \ | ||
| 34 | } \ | ||
| 35 | static inline void \ | ||
| 36 | list_type##_replace(list_type##_t *list, el_type *to_remove, \ | ||
| 37 | el_type *to_insert) { \ | ||
| 38 | ql_elm_new(to_insert, linkage); \ | ||
| 39 | ql_after_insert(to_remove, to_insert, linkage); \ | ||
| 40 | ql_remove(&list->head, to_remove, linkage); \ | ||
| 41 | } \ | ||
| 42 | static inline void \ | ||
| 43 | list_type##_remove(list_type##_t *list, el_type *item) { \ | ||
| 44 | ql_remove(&list->head, item, linkage); \ | ||
| 45 | } \ | ||
| 46 | static inline bool \ | ||
| 47 | list_type##_empty(list_type##_t *list) { \ | ||
| 48 | return ql_empty(&list->head); \ | ||
| 49 | } \ | ||
| 50 | static inline void \ | ||
| 51 | list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) { \ | ||
| 52 | ql_concat(&list_a->head, &list_b->head, linkage); \ | ||
| 53 | } | ||
| 54 | |||
| 55 | #endif /* JEMALLOC_INTERNAL_TYPED_LIST_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/util.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/util.h deleted file mode 100644 index dcb1c0a..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/util.h +++ /dev/null | |||
| @@ -1,123 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_UTIL_H | ||
| 2 | #define JEMALLOC_INTERNAL_UTIL_H | ||
| 3 | |||
| 4 | #define UTIL_INLINE static inline | ||
| 5 | |||
| 6 | /* Junk fill patterns. */ | ||
| 7 | #ifndef JEMALLOC_ALLOC_JUNK | ||
| 8 | # define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) | ||
| 9 | #endif | ||
| 10 | #ifndef JEMALLOC_FREE_JUNK | ||
| 11 | # define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) | ||
| 12 | #endif | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Wrap a cpp argument that contains commas such that it isn't broken up into | ||
| 16 | * multiple arguments. | ||
| 17 | */ | ||
| 18 | #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ | ||
| 19 | |||
| 20 | /* cpp macro definition stringification. */ | ||
| 21 | #define STRINGIFY_HELPER(x) #x | ||
| 22 | #define STRINGIFY(x) STRINGIFY_HELPER(x) | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Silence compiler warnings due to uninitialized values. This is used | ||
| 26 | * wherever the compiler fails to recognize that the variable is never used | ||
| 27 | * uninitialized. | ||
| 28 | */ | ||
| 29 | #define JEMALLOC_CC_SILENCE_INIT(v) = v | ||
| 30 | |||
| 31 | #ifdef __GNUC__ | ||
| 32 | # define likely(x) __builtin_expect(!!(x), 1) | ||
| 33 | # define unlikely(x) __builtin_expect(!!(x), 0) | ||
| 34 | #else | ||
| 35 | # define likely(x) !!(x) | ||
| 36 | # define unlikely(x) !!(x) | ||
| 37 | #endif | ||
| 38 | |||
| 39 | #if !defined(JEMALLOC_INTERNAL_UNREACHABLE) | ||
| 40 | # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure | ||
| 41 | #endif | ||
| 42 | |||
| 43 | #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() | ||
| 44 | |||
| 45 | /* Set error code. */ | ||
| 46 | UTIL_INLINE void | ||
| 47 | set_errno(int errnum) { | ||
| 48 | #ifdef _WIN32 | ||
| 49 | SetLastError(errnum); | ||
| 50 | #else | ||
| 51 | errno = errnum; | ||
| 52 | #endif | ||
| 53 | } | ||
| 54 | |||
| 55 | /* Get last error code. */ | ||
| 56 | UTIL_INLINE int | ||
| 57 | get_errno(void) { | ||
| 58 | #ifdef _WIN32 | ||
| 59 | return GetLastError(); | ||
| 60 | #else | ||
| 61 | return errno; | ||
| 62 | #endif | ||
| 63 | } | ||
| 64 | |||
| 65 | JEMALLOC_ALWAYS_INLINE void | ||
| 66 | util_assume(bool b) { | ||
| 67 | if (!b) { | ||
| 68 | unreachable(); | ||
| 69 | } | ||
| 70 | } | ||
| 71 | |||
| 72 | /* ptr should be valid. */ | ||
| 73 | JEMALLOC_ALWAYS_INLINE void | ||
| 74 | util_prefetch_read(void *ptr) { | ||
| 75 | /* | ||
| 76 | * This should arguably be a config check; but any version of GCC so old | ||
| 77 | * that it doesn't support __builtin_prefetch is also too old to build | ||
| 78 | * jemalloc. | ||
| 79 | */ | ||
| 80 | #ifdef __GNUC__ | ||
| 81 | if (config_debug) { | ||
| 82 | /* Enforce the "valid ptr" requirement. */ | ||
| 83 | *(volatile char *)ptr; | ||
| 84 | } | ||
| 85 | __builtin_prefetch(ptr, /* read or write */ 0, /* locality hint */ 3); | ||
| 86 | #else | ||
| 87 | *(volatile char *)ptr; | ||
| 88 | #endif | ||
| 89 | } | ||
| 90 | |||
| 91 | JEMALLOC_ALWAYS_INLINE void | ||
| 92 | util_prefetch_write(void *ptr) { | ||
| 93 | #ifdef __GNUC__ | ||
| 94 | if (config_debug) { | ||
| 95 | *(volatile char *)ptr; | ||
| 96 | } | ||
| 97 | /* | ||
| 98 | * The only difference from the read variant is that this has a 1 as the | ||
| 99 | * second argument (the write hint). | ||
| 100 | */ | ||
| 101 | __builtin_prefetch(ptr, 1, 3); | ||
| 102 | #else | ||
| 103 | *(volatile char *)ptr; | ||
| 104 | #endif | ||
| 105 | } | ||
| 106 | |||
| 107 | JEMALLOC_ALWAYS_INLINE void | ||
| 108 | util_prefetch_read_range(void *ptr, size_t sz) { | ||
| 109 | for (size_t i = 0; i < sz; i += CACHELINE) { | ||
| 110 | util_prefetch_read((void *)((uintptr_t)ptr + i)); | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | JEMALLOC_ALWAYS_INLINE void | ||
| 115 | util_prefetch_write_range(void *ptr, size_t sz) { | ||
| 116 | for (size_t i = 0; i < sz; i += CACHELINE) { | ||
| 117 | util_prefetch_write((void *)((uintptr_t)ptr + i)); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | #undef UTIL_INLINE | ||
| 122 | |||
| 123 | #endif /* JEMALLOC_INTERNAL_UTIL_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/witness.h b/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/witness.h deleted file mode 100644 index e81b9a0..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/internal/witness.h +++ /dev/null | |||
| @@ -1,378 +0,0 @@ | |||
| 1 | #ifndef JEMALLOC_INTERNAL_WITNESS_H | ||
| 2 | #define JEMALLOC_INTERNAL_WITNESS_H | ||
| 3 | |||
| 4 | #include "jemalloc/internal/ql.h" | ||
| 5 | |||
| 6 | /******************************************************************************/ | ||
| 7 | /* LOCK RANKS */ | ||
| 8 | /******************************************************************************/ | ||
| 9 | |||
| 10 | enum witness_rank_e { | ||
| 11 | /* | ||
| 12 | * Order matters within this enum listing -- higher valued locks can | ||
| 13 | * only be acquired after lower-valued ones. We use the | ||
| 14 | * auto-incrementing-ness of enum values to enforce this. | ||
| 15 | */ | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the | ||
| 19 | * witness machinery. | ||
| 20 | */ | ||
| 21 | WITNESS_RANK_OMIT, | ||
| 22 | WITNESS_RANK_MIN, | ||
| 23 | WITNESS_RANK_INIT = WITNESS_RANK_MIN, | ||
| 24 | WITNESS_RANK_CTL, | ||
| 25 | WITNESS_RANK_TCACHES, | ||
| 26 | WITNESS_RANK_ARENAS, | ||
| 27 | WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, | ||
| 28 | WITNESS_RANK_PROF_DUMP, | ||
| 29 | WITNESS_RANK_PROF_BT2GCTX, | ||
| 30 | WITNESS_RANK_PROF_TDATAS, | ||
| 31 | WITNESS_RANK_PROF_TDATA, | ||
| 32 | WITNESS_RANK_PROF_LOG, | ||
| 33 | WITNESS_RANK_PROF_GCTX, | ||
| 34 | WITNESS_RANK_PROF_RECENT_DUMP, | ||
| 35 | WITNESS_RANK_BACKGROUND_THREAD, | ||
| 36 | /* | ||
| 37 | * Used as an argument to witness_assert_depth_to_rank() in order to | ||
| 38 | * validate depth excluding non-core locks with lower ranks. Since the | ||
| 39 | * rank argument to witness_assert_depth_to_rank() is inclusive rather | ||
| 40 | * than exclusive, this definition can have the same value as the | ||
| 41 | * minimally ranked core lock. | ||
| 42 | */ | ||
| 43 | WITNESS_RANK_CORE, | ||
| 44 | WITNESS_RANK_DECAY = WITNESS_RANK_CORE, | ||
| 45 | WITNESS_RANK_TCACHE_QL, | ||
| 46 | |||
| 47 | WITNESS_RANK_SEC_SHARD, | ||
| 48 | |||
| 49 | WITNESS_RANK_EXTENT_GROW, | ||
| 50 | WITNESS_RANK_HPA_SHARD_GROW = WITNESS_RANK_EXTENT_GROW, | ||
| 51 | WITNESS_RANK_SAN_BUMP_ALLOC = WITNESS_RANK_EXTENT_GROW, | ||
| 52 | |||
| 53 | WITNESS_RANK_EXTENTS, | ||
| 54 | WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS, | ||
| 55 | |||
| 56 | WITNESS_RANK_HPA_CENTRAL_GROW, | ||
| 57 | WITNESS_RANK_HPA_CENTRAL, | ||
| 58 | |||
| 59 | WITNESS_RANK_EDATA_CACHE, | ||
| 60 | |||
| 61 | WITNESS_RANK_RTREE, | ||
| 62 | WITNESS_RANK_BASE, | ||
| 63 | WITNESS_RANK_ARENA_LARGE, | ||
| 64 | WITNESS_RANK_HOOK, | ||
| 65 | |||
| 66 | WITNESS_RANK_LEAF=0x1000, | ||
| 67 | WITNESS_RANK_BIN = WITNESS_RANK_LEAF, | ||
| 68 | WITNESS_RANK_ARENA_STATS = WITNESS_RANK_LEAF, | ||
| 69 | WITNESS_RANK_COUNTER_ACCUM = WITNESS_RANK_LEAF, | ||
| 70 | WITNESS_RANK_DSS = WITNESS_RANK_LEAF, | ||
| 71 | WITNESS_RANK_PROF_ACTIVE = WITNESS_RANK_LEAF, | ||
| 72 | WITNESS_RANK_PROF_DUMP_FILENAME = WITNESS_RANK_LEAF, | ||
| 73 | WITNESS_RANK_PROF_GDUMP = WITNESS_RANK_LEAF, | ||
| 74 | WITNESS_RANK_PROF_NEXT_THR_UID = WITNESS_RANK_LEAF, | ||
| 75 | WITNESS_RANK_PROF_RECENT_ALLOC = WITNESS_RANK_LEAF, | ||
| 76 | WITNESS_RANK_PROF_STATS = WITNESS_RANK_LEAF, | ||
| 77 | WITNESS_RANK_PROF_THREAD_ACTIVE_INIT = WITNESS_RANK_LEAF, | ||
| 78 | }; | ||
| 79 | typedef enum witness_rank_e witness_rank_t; | ||
| 80 | |||
| 81 | /******************************************************************************/ | ||
| 82 | /* PER-WITNESS DATA */ | ||
| 83 | /******************************************************************************/ | ||
| 84 | #if defined(JEMALLOC_DEBUG) | ||
| 85 | # define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}} | ||
| 86 | #else | ||
| 87 | # define WITNESS_INITIALIZER(name, rank) | ||
| 88 | #endif | ||
| 89 | |||
| 90 | typedef struct witness_s witness_t; | ||
| 91 | typedef ql_head(witness_t) witness_list_t; | ||
| 92 | typedef int witness_comp_t (const witness_t *, void *, const witness_t *, | ||
| 93 | void *); | ||
| 94 | |||
| 95 | struct witness_s { | ||
| 96 | /* Name, used for printing lock order reversal messages. */ | ||
| 97 | const char *name; | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Witness rank, where 0 is lowest and WITNESS_RANK_LEAF is highest. | ||
| 101 | * Witnesses must be acquired in order of increasing rank. | ||
| 102 | */ | ||
| 103 | witness_rank_t rank; | ||
| 104 | |||
| 105 | /* | ||
| 106 | * If two witnesses are of equal rank and they have the samp comp | ||
| 107 | * function pointer, it is called as a last attempt to differentiate | ||
| 108 | * between witnesses of equal rank. | ||
| 109 | */ | ||
| 110 | witness_comp_t *comp; | ||
| 111 | |||
| 112 | /* Opaque data, passed to comp(). */ | ||
| 113 | void *opaque; | ||
| 114 | |||
| 115 | /* Linkage for thread's currently owned locks. */ | ||
| 116 | ql_elm(witness_t) link; | ||
| 117 | }; | ||
| 118 | |||
| 119 | /******************************************************************************/ | ||
| 120 | /* PER-THREAD DATA */ | ||
| 121 | /******************************************************************************/ | ||
| 122 | typedef struct witness_tsd_s witness_tsd_t; | ||
| 123 | struct witness_tsd_s { | ||
| 124 | witness_list_t witnesses; | ||
| 125 | bool forking; | ||
| 126 | }; | ||
| 127 | |||
| 128 | #define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false } | ||
| 129 | #define WITNESS_TSDN_NULL ((witness_tsdn_t *)0) | ||
| 130 | |||
| 131 | /******************************************************************************/ | ||
| 132 | /* (PER-THREAD) NULLABILITY HELPERS */ | ||
| 133 | /******************************************************************************/ | ||
| 134 | typedef struct witness_tsdn_s witness_tsdn_t; | ||
| 135 | struct witness_tsdn_s { | ||
| 136 | witness_tsd_t witness_tsd; | ||
| 137 | }; | ||
| 138 | |||
| 139 | JEMALLOC_ALWAYS_INLINE witness_tsdn_t * | ||
| 140 | witness_tsd_tsdn(witness_tsd_t *witness_tsd) { | ||
| 141 | return (witness_tsdn_t *)witness_tsd; | ||
| 142 | } | ||
| 143 | |||
| 144 | JEMALLOC_ALWAYS_INLINE bool | ||
| 145 | witness_tsdn_null(witness_tsdn_t *witness_tsdn) { | ||
| 146 | return witness_tsdn == NULL; | ||
| 147 | } | ||
| 148 | |||
| 149 | JEMALLOC_ALWAYS_INLINE witness_tsd_t * | ||
| 150 | witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) { | ||
| 151 | assert(!witness_tsdn_null(witness_tsdn)); | ||
| 152 | return &witness_tsdn->witness_tsd; | ||
| 153 | } | ||
| 154 | |||
| 155 | /******************************************************************************/ | ||
| 156 | /* API */ | ||
| 157 | /******************************************************************************/ | ||
| 158 | void witness_init(witness_t *witness, const char *name, witness_rank_t rank, | ||
| 159 | witness_comp_t *comp, void *opaque); | ||
| 160 | |||
| 161 | typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); | ||
| 162 | extern witness_lock_error_t *JET_MUTABLE witness_lock_error; | ||
| 163 | |||
| 164 | typedef void (witness_owner_error_t)(const witness_t *); | ||
| 165 | extern witness_owner_error_t *JET_MUTABLE witness_owner_error; | ||
| 166 | |||
| 167 | typedef void (witness_not_owner_error_t)(const witness_t *); | ||
| 168 | extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error; | ||
| 169 | |||
| 170 | typedef void (witness_depth_error_t)(const witness_list_t *, | ||
| 171 | witness_rank_t rank_inclusive, unsigned depth); | ||
| 172 | extern witness_depth_error_t *JET_MUTABLE witness_depth_error; | ||
| 173 | |||
| 174 | void witnesses_cleanup(witness_tsd_t *witness_tsd); | ||
| 175 | void witness_prefork(witness_tsd_t *witness_tsd); | ||
| 176 | void witness_postfork_parent(witness_tsd_t *witness_tsd); | ||
| 177 | void witness_postfork_child(witness_tsd_t *witness_tsd); | ||
| 178 | |||
| 179 | /* Helper, not intended for direct use. */ | ||
| 180 | static inline bool | ||
| 181 | witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) { | ||
| 182 | witness_list_t *witnesses; | ||
| 183 | witness_t *w; | ||
| 184 | |||
| 185 | cassert(config_debug); | ||
| 186 | |||
| 187 | witnesses = &witness_tsd->witnesses; | ||
| 188 | ql_foreach(w, witnesses, link) { | ||
| 189 | if (w == witness) { | ||
| 190 | return true; | ||
| 191 | } | ||
| 192 | } | ||
| 193 | |||
| 194 | return false; | ||
| 195 | } | ||
| 196 | |||
| 197 | static inline void | ||
| 198 | witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { | ||
| 199 | witness_tsd_t *witness_tsd; | ||
| 200 | |||
| 201 | if (!config_debug) { | ||
| 202 | return; | ||
| 203 | } | ||
| 204 | |||
| 205 | if (witness_tsdn_null(witness_tsdn)) { | ||
| 206 | return; | ||
| 207 | } | ||
| 208 | witness_tsd = witness_tsdn_tsd(witness_tsdn); | ||
| 209 | if (witness->rank == WITNESS_RANK_OMIT) { | ||
| 210 | return; | ||
| 211 | } | ||
| 212 | |||
| 213 | if (witness_owner(witness_tsd, witness)) { | ||
| 214 | return; | ||
| 215 | } | ||
| 216 | witness_owner_error(witness); | ||
| 217 | } | ||
| 218 | |||
| 219 | static inline void | ||
| 220 | witness_assert_not_owner(witness_tsdn_t *witness_tsdn, | ||
| 221 | const witness_t *witness) { | ||
| 222 | witness_tsd_t *witness_tsd; | ||
| 223 | witness_list_t *witnesses; | ||
| 224 | witness_t *w; | ||
| 225 | |||
| 226 | if (!config_debug) { | ||
| 227 | return; | ||
| 228 | } | ||
| 229 | |||
| 230 | if (witness_tsdn_null(witness_tsdn)) { | ||
| 231 | return; | ||
| 232 | } | ||
| 233 | witness_tsd = witness_tsdn_tsd(witness_tsdn); | ||
| 234 | if (witness->rank == WITNESS_RANK_OMIT) { | ||
| 235 | return; | ||
| 236 | } | ||
| 237 | |||
| 238 | witnesses = &witness_tsd->witnesses; | ||
| 239 | ql_foreach(w, witnesses, link) { | ||
| 240 | if (w == witness) { | ||
| 241 | witness_not_owner_error(witness); | ||
| 242 | } | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | /* Returns depth. Not intended for direct use. */ | ||
| 247 | static inline unsigned | ||
| 248 | witness_depth_to_rank(witness_list_t *witnesses, witness_rank_t rank_inclusive) | ||
| 249 | { | ||
| 250 | unsigned d = 0; | ||
| 251 | witness_t *w = ql_last(witnesses, link); | ||
| 252 | |||
| 253 | if (w != NULL) { | ||
| 254 | ql_reverse_foreach(w, witnesses, link) { | ||
| 255 | if (w->rank < rank_inclusive) { | ||
| 256 | break; | ||
| 257 | } | ||
| 258 | d++; | ||
| 259 | } | ||
| 260 | } | ||
| 261 | |||
| 262 | return d; | ||
| 263 | } | ||
| 264 | |||
| 265 | static inline void | ||
| 266 | witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, | ||
| 267 | witness_rank_t rank_inclusive, unsigned depth) { | ||
| 268 | if (!config_debug || witness_tsdn_null(witness_tsdn)) { | ||
| 269 | return; | ||
| 270 | } | ||
| 271 | |||
| 272 | witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses; | ||
| 273 | unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); | ||
| 274 | |||
| 275 | if (d != depth) { | ||
| 276 | witness_depth_error(witnesses, rank_inclusive, depth); | ||
| 277 | } | ||
| 278 | } | ||
| 279 | |||
| 280 | static inline void | ||
| 281 | witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) { | ||
| 282 | witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth); | ||
| 283 | } | ||
| 284 | |||
| 285 | static inline void | ||
| 286 | witness_assert_lockless(witness_tsdn_t *witness_tsdn) { | ||
| 287 | witness_assert_depth(witness_tsdn, 0); | ||
| 288 | } | ||
| 289 | |||
| 290 | static inline void | ||
| 291 | witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn, | ||
| 292 | witness_rank_t rank_inclusive) { | ||
| 293 | if (!config_debug || witness_tsdn_null(witness_tsdn)) { | ||
| 294 | return; | ||
| 295 | } | ||
| 296 | |||
| 297 | witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses; | ||
| 298 | unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); | ||
| 299 | |||
| 300 | if (d == 0) { | ||
| 301 | witness_depth_error(witnesses, rank_inclusive, 1); | ||
| 302 | } | ||
| 303 | } | ||
| 304 | |||
| 305 | static inline void | ||
| 306 | witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { | ||
| 307 | witness_tsd_t *witness_tsd; | ||
| 308 | witness_list_t *witnesses; | ||
| 309 | witness_t *w; | ||
| 310 | |||
| 311 | if (!config_debug) { | ||
| 312 | return; | ||
| 313 | } | ||
| 314 | |||
| 315 | if (witness_tsdn_null(witness_tsdn)) { | ||
| 316 | return; | ||
| 317 | } | ||
| 318 | witness_tsd = witness_tsdn_tsd(witness_tsdn); | ||
| 319 | if (witness->rank == WITNESS_RANK_OMIT) { | ||
| 320 | return; | ||
| 321 | } | ||
| 322 | |||
| 323 | witness_assert_not_owner(witness_tsdn, witness); | ||
| 324 | |||
| 325 | witnesses = &witness_tsd->witnesses; | ||
| 326 | w = ql_last(witnesses, link); | ||
| 327 | if (w == NULL) { | ||
| 328 | /* No other locks; do nothing. */ | ||
| 329 | } else if (witness_tsd->forking && w->rank <= witness->rank) { | ||
| 330 | /* Forking, and relaxed ranking satisfied. */ | ||
| 331 | } else if (w->rank > witness->rank) { | ||
| 332 | /* Not forking, rank order reversal. */ | ||
| 333 | witness_lock_error(witnesses, witness); | ||
| 334 | } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != | ||
| 335 | witness->comp || w->comp(w, w->opaque, witness, witness->opaque) > | ||
| 336 | 0)) { | ||
| 337 | /* | ||
| 338 | * Missing/incompatible comparison function, or comparison | ||
| 339 | * function indicates rank order reversal. | ||
| 340 | */ | ||
| 341 | witness_lock_error(witnesses, witness); | ||
| 342 | } | ||
| 343 | |||
| 344 | ql_elm_new(witness, link); | ||
| 345 | ql_tail_insert(witnesses, witness, link); | ||
| 346 | } | ||
| 347 | |||
| 348 | static inline void | ||
| 349 | witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) { | ||
| 350 | witness_tsd_t *witness_tsd; | ||
| 351 | witness_list_t *witnesses; | ||
| 352 | |||
| 353 | if (!config_debug) { | ||
| 354 | return; | ||
| 355 | } | ||
| 356 | |||
| 357 | if (witness_tsdn_null(witness_tsdn)) { | ||
| 358 | return; | ||
| 359 | } | ||
| 360 | witness_tsd = witness_tsdn_tsd(witness_tsdn); | ||
| 361 | if (witness->rank == WITNESS_RANK_OMIT) { | ||
| 362 | return; | ||
| 363 | } | ||
| 364 | |||
| 365 | /* | ||
| 366 | * Check whether owner before removal, rather than relying on | ||
| 367 | * witness_assert_owner() to abort, so that unit tests can test this | ||
| 368 | * function's failure mode without causing undefined behavior. | ||
| 369 | */ | ||
| 370 | if (witness_owner(witness_tsd, witness)) { | ||
| 371 | witnesses = &witness_tsd->witnesses; | ||
| 372 | ql_remove(witnesses, witness, link); | ||
| 373 | } else { | ||
| 374 | witness_assert_owner(witness_tsdn, witness); | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 378 | #endif /* JEMALLOC_INTERNAL_WITNESS_H */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc.sh deleted file mode 100755 index b19b154..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc.sh +++ /dev/null | |||
| @@ -1,27 +0,0 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | |||
| 3 | objroot=$1 | ||
| 4 | |||
| 5 | cat <<EOF | ||
| 6 | #ifndef JEMALLOC_H_ | ||
| 7 | #define JEMALLOC_H_ | ||
| 8 | #ifdef __cplusplus | ||
| 9 | extern "C" { | ||
| 10 | #endif | ||
| 11 | |||
| 12 | EOF | ||
| 13 | |||
| 14 | for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \ | ||
| 15 | jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do | ||
| 16 | cat "${objroot}include/jemalloc/${hdr}" \ | ||
| 17 | | grep -v 'Generated from .* by configure\.' \ | ||
| 18 | | sed -e 's/ $//g' | ||
| 19 | echo | ||
| 20 | done | ||
| 21 | |||
| 22 | cat <<EOF | ||
| 23 | #ifdef __cplusplus | ||
| 24 | } | ||
| 25 | #endif | ||
| 26 | #endif /* JEMALLOC_H_ */ | ||
| 27 | EOF | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in b/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in deleted file mode 100644 index cbe2fca..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in +++ /dev/null | |||
| @@ -1,54 +0,0 @@ | |||
| 1 | /* Defined if __attribute__((...)) syntax is supported. */ | ||
| 2 | #undef JEMALLOC_HAVE_ATTR | ||
| 3 | |||
| 4 | /* Defined if alloc_size attribute is supported. */ | ||
| 5 | #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE | ||
| 6 | |||
| 7 | /* Defined if format_arg(...) attribute is supported. */ | ||
| 8 | #undef JEMALLOC_HAVE_ATTR_FORMAT_ARG | ||
| 9 | |||
| 10 | /* Defined if format(gnu_printf, ...) attribute is supported. */ | ||
| 11 | #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF | ||
| 12 | |||
| 13 | /* Defined if format(printf, ...) attribute is supported. */ | ||
| 14 | #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | ||
| 15 | |||
| 16 | /* Defined if fallthrough attribute is supported. */ | ||
| 17 | #undef JEMALLOC_HAVE_ATTR_FALLTHROUGH | ||
| 18 | |||
| 19 | /* Defined if cold attribute is supported. */ | ||
| 20 | #undef JEMALLOC_HAVE_ATTR_COLD | ||
| 21 | |||
| 22 | /* | ||
| 23 | * Define overrides for non-standard allocator-related functions if they are | ||
| 24 | * present on the system. | ||
| 25 | */ | ||
| 26 | #undef JEMALLOC_OVERRIDE_MEMALIGN | ||
| 27 | #undef JEMALLOC_OVERRIDE_VALLOC | ||
| 28 | |||
| 29 | /* | ||
| 30 | * At least Linux omits the "const" in: | ||
| 31 | * | ||
| 32 | * size_t malloc_usable_size(const void *ptr); | ||
| 33 | * | ||
| 34 | * Match the operating system's prototype. | ||
| 35 | */ | ||
| 36 | #undef JEMALLOC_USABLE_SIZE_CONST | ||
| 37 | |||
| 38 | /* | ||
| 39 | * If defined, specify throw() for the public function prototypes when compiling | ||
| 40 | * with C++. The only justification for this is to match the prototypes that | ||
| 41 | * glibc defines. | ||
| 42 | */ | ||
| 43 | #undef JEMALLOC_USE_CXX_THROW | ||
| 44 | |||
| 45 | #ifdef _MSC_VER | ||
| 46 | # ifdef _WIN64 | ||
| 47 | # define LG_SIZEOF_PTR_WIN 3 | ||
| 48 | # else | ||
| 49 | # define LG_SIZEOF_PTR_WIN 2 | ||
| 50 | # endif | ||
| 51 | #endif | ||
| 52 | |||
| 53 | /* sizeof(void *) == 2^LG_SIZEOF_PTR. */ | ||
| 54 | #undef LG_SIZEOF_PTR | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in deleted file mode 100644 index 8d81a75..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in +++ /dev/null | |||
| @@ -1,156 +0,0 @@ | |||
| 1 | #include <stdlib.h> | ||
| 2 | #include <stdbool.h> | ||
| 3 | #include <stdint.h> | ||
| 4 | #include <limits.h> | ||
| 5 | #include <strings.h> | ||
| 6 | |||
| 7 | #define JEMALLOC_VERSION "@jemalloc_version@" | ||
| 8 | #define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ | ||
| 9 | #define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ | ||
| 10 | #define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ | ||
| 11 | #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ | ||
| 12 | #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" | ||
| 13 | #define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@ | ||
| 14 | |||
| 15 | #define MALLOCX_LG_ALIGN(la) ((int)(la)) | ||
| 16 | #if LG_SIZEOF_PTR == 2 | ||
| 17 | # define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) | ||
| 18 | #else | ||
| 19 | # define MALLOCX_ALIGN(a) \ | ||
| 20 | ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ | ||
| 21 | ffs((int)(((size_t)(a))>>32))+31)) | ||
| 22 | #endif | ||
| 23 | #define MALLOCX_ZERO ((int)0x40) | ||
| 24 | /* | ||
| 25 | * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 | ||
| 26 | * encodes MALLOCX_TCACHE_NONE. | ||
| 27 | */ | ||
| 28 | #define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) | ||
| 29 | #define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) | ||
| 30 | /* | ||
| 31 | * Bias arena index bits so that 0 encodes "use an automatically chosen arena". | ||
| 32 | */ | ||
| 33 | #define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) | ||
| 34 | |||
| 35 | /* | ||
| 36 | * Use as arena index in "arena.<i>.{purge,decay,dss}" and | ||
| 37 | * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This | ||
| 38 | * definition is intentionally specified in raw decimal format to support | ||
| 39 | * cpp-based string concatenation, e.g. | ||
| 40 | * | ||
| 41 | * #define STRINGIFY_HELPER(x) #x | ||
| 42 | * #define STRINGIFY(x) STRINGIFY_HELPER(x) | ||
| 43 | * | ||
| 44 | * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, | ||
| 45 | * 0); | ||
| 46 | */ | ||
| 47 | #define MALLCTL_ARENAS_ALL 4096 | ||
| 48 | /* | ||
| 49 | * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select | ||
| 50 | * destroyed arenas. | ||
| 51 | */ | ||
| 52 | #define MALLCTL_ARENAS_DESTROYED 4097 | ||
| 53 | |||
| 54 | #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) | ||
| 55 | # define JEMALLOC_CXX_THROW throw() | ||
| 56 | #else | ||
| 57 | # define JEMALLOC_CXX_THROW | ||
| 58 | #endif | ||
| 59 | |||
| 60 | #if defined(_MSC_VER) | ||
| 61 | # define JEMALLOC_ATTR(s) | ||
| 62 | # define JEMALLOC_ALIGNED(s) __declspec(align(s)) | ||
| 63 | # define JEMALLOC_ALLOC_SIZE(s) | ||
| 64 | # define JEMALLOC_ALLOC_SIZE2(s1, s2) | ||
| 65 | # ifndef JEMALLOC_EXPORT | ||
| 66 | # ifdef DLLEXPORT | ||
| 67 | # define JEMALLOC_EXPORT __declspec(dllexport) | ||
| 68 | # else | ||
| 69 | # define JEMALLOC_EXPORT __declspec(dllimport) | ||
| 70 | # endif | ||
| 71 | # endif | ||
| 72 | # define JEMALLOC_FORMAT_ARG(i) | ||
| 73 | # define JEMALLOC_FORMAT_PRINTF(s, i) | ||
| 74 | # define JEMALLOC_FALLTHROUGH | ||
| 75 | # define JEMALLOC_NOINLINE __declspec(noinline) | ||
| 76 | # ifdef __cplusplus | ||
| 77 | # define JEMALLOC_NOTHROW __declspec(nothrow) | ||
| 78 | # else | ||
| 79 | # define JEMALLOC_NOTHROW | ||
| 80 | # endif | ||
| 81 | # define JEMALLOC_SECTION(s) __declspec(allocate(s)) | ||
| 82 | # define JEMALLOC_RESTRICT_RETURN __declspec(restrict) | ||
| 83 | # if _MSC_VER >= 1900 && !defined(__EDG__) | ||
| 84 | # define JEMALLOC_ALLOCATOR __declspec(allocator) | ||
| 85 | # else | ||
| 86 | # define JEMALLOC_ALLOCATOR | ||
| 87 | # endif | ||
| 88 | # define JEMALLOC_COLD | ||
| 89 | #elif defined(JEMALLOC_HAVE_ATTR) | ||
| 90 | # define JEMALLOC_ATTR(s) __attribute__((s)) | ||
| 91 | # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) | ||
| 92 | # ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE | ||
| 93 | # define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) | ||
| 94 | # define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) | ||
| 95 | # else | ||
| 96 | # define JEMALLOC_ALLOC_SIZE(s) | ||
| 97 | # define JEMALLOC_ALLOC_SIZE2(s1, s2) | ||
| 98 | # endif | ||
| 99 | # ifndef JEMALLOC_EXPORT | ||
| 100 | # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) | ||
| 101 | # endif | ||
| 102 | # ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG | ||
| 103 | # define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3)) | ||
| 104 | # else | ||
| 105 | # define JEMALLOC_FORMAT_ARG(i) | ||
| 106 | # endif | ||
| 107 | # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF | ||
| 108 | # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) | ||
| 109 | # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) | ||
| 110 | # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) | ||
| 111 | # else | ||
| 112 | # define JEMALLOC_FORMAT_PRINTF(s, i) | ||
| 113 | # endif | ||
| 114 | # ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH | ||
| 115 | # define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough) | ||
| 116 | # else | ||
| 117 | # define JEMALLOC_FALLTHROUGH | ||
| 118 | # endif | ||
| 119 | # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) | ||
| 120 | # define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) | ||
| 121 | # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) | ||
| 122 | # define JEMALLOC_RESTRICT_RETURN | ||
| 123 | # define JEMALLOC_ALLOCATOR | ||
| 124 | # ifdef JEMALLOC_HAVE_ATTR_COLD | ||
| 125 | # define JEMALLOC_COLD JEMALLOC_ATTR(__cold__) | ||
| 126 | # else | ||
| 127 | # define JEMALLOC_COLD | ||
| 128 | # endif | ||
| 129 | #else | ||
| 130 | # define JEMALLOC_ATTR(s) | ||
| 131 | # define JEMALLOC_ALIGNED(s) | ||
| 132 | # define JEMALLOC_ALLOC_SIZE(s) | ||
| 133 | # define JEMALLOC_ALLOC_SIZE2(s1, s2) | ||
| 134 | # define JEMALLOC_EXPORT | ||
| 135 | # define JEMALLOC_FORMAT_PRINTF(s, i) | ||
| 136 | # define JEMALLOC_FALLTHROUGH | ||
| 137 | # define JEMALLOC_NOINLINE | ||
| 138 | # define JEMALLOC_NOTHROW | ||
| 139 | # define JEMALLOC_SECTION(s) | ||
| 140 | # define JEMALLOC_RESTRICT_RETURN | ||
| 141 | # define JEMALLOC_ALLOCATOR | ||
| 142 | # define JEMALLOC_COLD | ||
| 143 | #endif | ||
| 144 | |||
| 145 | #if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME) | ||
| 146 | # define JEMALLOC_SYS_NOTHROW | ||
| 147 | #else | ||
| 148 | # define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW | ||
| 149 | #endif | ||
| 150 | |||
| 151 | /* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint() | ||
| 152 | * function. */ | ||
| 153 | #define JEMALLOC_FRAG_HINT | ||
| 154 | |||
| 155 | /* This version of Jemalloc, modified for Redis, has the je_*_usable() family functions. */ | ||
| 156 | #define JEMALLOC_ALLOC_WITH_USIZE | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh deleted file mode 100755 index c675bb4..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh +++ /dev/null | |||
| @@ -1,45 +0,0 @@ | |||
| 1 | #!/bin/sh -eu | ||
| 2 | |||
| 3 | public_symbols_txt=$1 | ||
| 4 | symbol_prefix=$2 | ||
| 5 | |||
| 6 | cat <<EOF | ||
| 7 | /* | ||
| 8 | * By default application code must explicitly refer to mangled symbol names, | ||
| 9 | * so that it is possible to use jemalloc in conjunction with another allocator | ||
| 10 | * in the same application. Define JEMALLOC_MANGLE in order to cause automatic | ||
| 11 | * name mangling that matches the API prefixing that happened as a result of | ||
| 12 | * --with-mangling and/or --with-jemalloc-prefix configuration settings. | ||
| 13 | */ | ||
| 14 | #ifdef JEMALLOC_MANGLE | ||
| 15 | # ifndef JEMALLOC_NO_DEMANGLE | ||
| 16 | # define JEMALLOC_NO_DEMANGLE | ||
| 17 | # endif | ||
| 18 | EOF | ||
| 19 | |||
| 20 | for nm in `cat ${public_symbols_txt}` ; do | ||
| 21 | n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` | ||
| 22 | echo "# define ${n} ${symbol_prefix}${n}" | ||
| 23 | done | ||
| 24 | |||
| 25 | cat <<EOF | ||
| 26 | #endif | ||
| 27 | |||
| 28 | /* | ||
| 29 | * The ${symbol_prefix}* macros can be used as stable alternative names for the | ||
| 30 | * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily | ||
| 31 | * meant for use in jemalloc itself, but it can be used by application code to | ||
| 32 | * provide isolation from the name mangling specified via --with-mangling | ||
| 33 | * and/or --with-jemalloc-prefix. | ||
| 34 | */ | ||
| 35 | #ifndef JEMALLOC_NO_DEMANGLE | ||
| 36 | EOF | ||
| 37 | |||
| 38 | for nm in `cat ${public_symbols_txt}` ; do | ||
| 39 | n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` | ||
| 40 | echo "# undef ${symbol_prefix}${n}" | ||
| 41 | done | ||
| 42 | |||
| 43 | cat <<EOF | ||
| 44 | #endif | ||
| 45 | EOF | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in b/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in deleted file mode 100644 index 356221c..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in +++ /dev/null | |||
| @@ -1,71 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * The @je_@ prefix on the following public symbol declarations is an artifact | ||
| 3 | * of namespace management, and should be omitted in application code unless | ||
| 4 | * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h). | ||
| 5 | */ | ||
| 6 | extern JEMALLOC_EXPORT const char *@je_@malloc_conf; | ||
| 7 | extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque, | ||
| 8 | const char *s); | ||
| 9 | |||
| 10 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 11 | void JEMALLOC_SYS_NOTHROW *@je_@malloc(size_t size) | ||
| 12 | JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); | ||
| 13 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 14 | void JEMALLOC_SYS_NOTHROW *@je_@calloc(size_t num, size_t size) | ||
| 15 | JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); | ||
| 16 | JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW @je_@posix_memalign( | ||
| 17 | void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW | ||
| 18 | JEMALLOC_ATTR(nonnull(1)); | ||
| 19 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 20 | void JEMALLOC_SYS_NOTHROW *@je_@aligned_alloc(size_t alignment, | ||
| 21 | size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) | ||
| 22 | JEMALLOC_ALLOC_SIZE(2); | ||
| 23 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 24 | void JEMALLOC_SYS_NOTHROW *@je_@realloc(void *ptr, size_t size) | ||
| 25 | JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); | ||
| 26 | JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW @je_@free(void *ptr) | ||
| 27 | JEMALLOC_CXX_THROW; | ||
| 28 | |||
| 29 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 30 | void JEMALLOC_NOTHROW *@je_@mallocx(size_t size, int flags) | ||
| 31 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); | ||
| 32 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 33 | void JEMALLOC_NOTHROW *@je_@rallocx(void *ptr, size_t size, | ||
| 34 | int flags) JEMALLOC_ALLOC_SIZE(2); | ||
| 35 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@xallocx(void *ptr, size_t size, | ||
| 36 | size_t extra, int flags); | ||
| 37 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@sallocx(const void *ptr, | ||
| 38 | int flags) JEMALLOC_ATTR(pure); | ||
| 39 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@dallocx(void *ptr, int flags); | ||
| 40 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@sdallocx(void *ptr, size_t size, | ||
| 41 | int flags); | ||
| 42 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@nallocx(size_t size, int flags) | ||
| 43 | JEMALLOC_ATTR(pure); | ||
| 44 | |||
| 45 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctl(const char *name, | ||
| 46 | void *oldp, size_t *oldlenp, void *newp, size_t newlen); | ||
| 47 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlnametomib(const char *name, | ||
| 48 | size_t *mibp, size_t *miblenp); | ||
| 49 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlbymib(const size_t *mib, | ||
| 50 | size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); | ||
| 51 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print( | ||
| 52 | void (*write_cb)(void *, const char *), void *@je_@cbopaque, | ||
| 53 | const char *opts); | ||
| 54 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size( | ||
| 55 | JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; | ||
| 56 | #ifdef JEMALLOC_HAVE_MALLOC_SIZE | ||
| 57 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_size( | ||
| 58 | const void *ptr); | ||
| 59 | #endif | ||
| 60 | |||
| 61 | #ifdef JEMALLOC_OVERRIDE_MEMALIGN | ||
| 62 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 63 | void JEMALLOC_SYS_NOTHROW *@je_@memalign(size_t alignment, size_t size) | ||
| 64 | JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); | ||
| 65 | #endif | ||
| 66 | |||
| 67 | #ifdef JEMALLOC_OVERRIDE_VALLOC | ||
| 68 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 69 | void JEMALLOC_SYS_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW | ||
| 70 | JEMALLOC_ATTR(malloc); | ||
| 71 | #endif | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_rename.sh b/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_rename.sh deleted file mode 100755 index f943891..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_rename.sh +++ /dev/null | |||
| @@ -1,22 +0,0 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | |||
| 3 | public_symbols_txt=$1 | ||
| 4 | |||
| 5 | cat <<EOF | ||
| 6 | /* | ||
| 7 | * Name mangling for public symbols is controlled by --with-mangling and | ||
| 8 | * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by | ||
| 9 | * these macro definitions. | ||
| 10 | */ | ||
| 11 | #ifndef JEMALLOC_NO_RENAME | ||
| 12 | EOF | ||
| 13 | |||
| 14 | for nm in `cat ${public_symbols_txt}` ; do | ||
| 15 | n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` | ||
| 16 | m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` | ||
| 17 | echo "# define je_${n} ${m}" | ||
| 18 | done | ||
| 19 | |||
| 20 | cat <<EOF | ||
| 21 | #endif | ||
| 22 | EOF | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in b/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in deleted file mode 100644 index 1a58874..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in +++ /dev/null | |||
| @@ -1,77 +0,0 @@ | |||
| 1 | typedef struct extent_hooks_s extent_hooks_t; | ||
| 2 | |||
| 3 | /* | ||
| 4 | * void * | ||
| 5 | * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, | ||
| 6 | * size_t alignment, bool *zero, bool *commit, unsigned arena_ind); | ||
| 7 | */ | ||
| 8 | typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, | ||
| 9 | bool *, unsigned); | ||
| 10 | |||
| 11 | /* | ||
| 12 | * bool | ||
| 13 | * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, | ||
| 14 | * bool committed, unsigned arena_ind); | ||
| 15 | */ | ||
| 16 | typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool, | ||
| 17 | unsigned); | ||
| 18 | |||
| 19 | /* | ||
| 20 | * void | ||
| 21 | * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, | ||
| 22 | * bool committed, unsigned arena_ind); | ||
| 23 | */ | ||
| 24 | typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool, | ||
| 25 | unsigned); | ||
| 26 | |||
| 27 | /* | ||
| 28 | * bool | ||
| 29 | * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, | ||
| 30 | * size_t offset, size_t length, unsigned arena_ind); | ||
| 31 | */ | ||
| 32 | typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, | ||
| 33 | unsigned); | ||
| 34 | |||
| 35 | /* | ||
| 36 | * bool | ||
| 37 | * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, | ||
| 38 | * size_t offset, size_t length, unsigned arena_ind); | ||
| 39 | */ | ||
| 40 | typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t, | ||
| 41 | size_t, unsigned); | ||
| 42 | |||
| 43 | /* | ||
| 44 | * bool | ||
| 45 | * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size, | ||
| 46 | * size_t offset, size_t length, unsigned arena_ind); | ||
| 47 | */ | ||
| 48 | typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t, | ||
| 49 | unsigned); | ||
| 50 | |||
| 51 | /* | ||
| 52 | * bool | ||
| 53 | * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size, | ||
| 54 | * size_t size_a, size_t size_b, bool committed, unsigned arena_ind); | ||
| 55 | */ | ||
| 56 | typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t, | ||
| 57 | bool, unsigned); | ||
| 58 | |||
| 59 | /* | ||
| 60 | * bool | ||
| 61 | * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, | ||
| 62 | * void *addr_b, size_t size_b, bool committed, unsigned arena_ind); | ||
| 63 | */ | ||
| 64 | typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t, | ||
| 65 | bool, unsigned); | ||
| 66 | |||
| 67 | struct extent_hooks_s { | ||
| 68 | extent_alloc_t *alloc; | ||
| 69 | extent_dalloc_t *dalloc; | ||
| 70 | extent_destroy_t *destroy; | ||
| 71 | extent_commit_t *commit; | ||
| 72 | extent_decommit_t *decommit; | ||
| 73 | extent_purge_t *purge_lazy; | ||
| 74 | extent_purge_t *purge_forced; | ||
| 75 | extent_split_t *split; | ||
| 76 | extent_merge_t *merge; | ||
| 77 | }; | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/C99/stdbool.h b/examples/redis-unstable/deps/jemalloc/include/msvc_compat/C99/stdbool.h deleted file mode 100644 index d92160e..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/C99/stdbool.h +++ /dev/null | |||
| @@ -1,20 +0,0 @@ | |||
| 1 | #ifndef stdbool_h | ||
| 2 | #define stdbool_h | ||
| 3 | |||
| 4 | #include <wtypes.h> | ||
| 5 | |||
| 6 | /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ | ||
| 7 | /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ | ||
| 8 | /* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as | ||
| 9 | * a built-in type. */ | ||
| 10 | #ifndef __clang__ | ||
| 11 | typedef BOOL _Bool; | ||
| 12 | #endif | ||
| 13 | |||
| 14 | #define bool _Bool | ||
| 15 | #define true 1 | ||
| 16 | #define false 0 | ||
| 17 | |||
| 18 | #define __bool_true_false_are_defined 1 | ||
| 19 | |||
| 20 | #endif /* stdbool_h */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/C99/stdint.h b/examples/redis-unstable/deps/jemalloc/include/msvc_compat/C99/stdint.h deleted file mode 100644 index d02608a..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/C99/stdint.h +++ /dev/null | |||
| @@ -1,247 +0,0 @@ | |||
| 1 | // ISO C9x compliant stdint.h for Microsoft Visual Studio | ||
| 2 | // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 | ||
| 3 | // | ||
| 4 | // Copyright (c) 2006-2008 Alexander Chemeris | ||
| 5 | // | ||
| 6 | // Redistribution and use in source and binary forms, with or without | ||
| 7 | // modification, are permitted provided that the following conditions are met: | ||
| 8 | // | ||
| 9 | // 1. Redistributions of source code must retain the above copyright notice, | ||
| 10 | // this list of conditions and the following disclaimer. | ||
| 11 | // | ||
| 12 | // 2. Redistributions in binary form must reproduce the above copyright | ||
| 13 | // notice, this list of conditions and the following disclaimer in the | ||
| 14 | // documentation and/or other materials provided with the distribution. | ||
| 15 | // | ||
| 16 | // 3. The name of the author may be used to endorse or promote products | ||
| 17 | // derived from this software without specific prior written permission. | ||
| 18 | // | ||
| 19 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
| 20 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 21 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | ||
| 22 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
| 24 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | ||
| 25 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
| 26 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | ||
| 27 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | ||
| 28 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 29 | // | ||
| 30 | /////////////////////////////////////////////////////////////////////////////// | ||
| 31 | |||
| 32 | #ifndef _MSC_VER // [ | ||
| 33 | #error "Use this header only with Microsoft Visual C++ compilers!" | ||
| 34 | #endif // _MSC_VER ] | ||
| 35 | |||
| 36 | #ifndef _MSC_STDINT_H_ // [ | ||
| 37 | #define _MSC_STDINT_H_ | ||
| 38 | |||
| 39 | #if _MSC_VER > 1000 | ||
| 40 | #pragma once | ||
| 41 | #endif | ||
| 42 | |||
| 43 | #include <limits.h> | ||
| 44 | |||
| 45 | // For Visual Studio 6 in C++ mode and for many Visual Studio versions when | ||
| 46 | // compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' | ||
| 47 | // or compiler give many errors like this: | ||
| 48 | // error C2733: second C linkage of overloaded function 'wmemchr' not allowed | ||
| 49 | #ifdef __cplusplus | ||
| 50 | extern "C" { | ||
| 51 | #endif | ||
| 52 | # include <wchar.h> | ||
| 53 | #ifdef __cplusplus | ||
| 54 | } | ||
| 55 | #endif | ||
| 56 | |||
| 57 | // Define _W64 macros to mark types changing their size, like intptr_t. | ||
| 58 | #ifndef _W64 | ||
| 59 | # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 | ||
| 60 | # define _W64 __w64 | ||
| 61 | # else | ||
| 62 | # define _W64 | ||
| 63 | # endif | ||
| 64 | #endif | ||
| 65 | |||
| 66 | |||
| 67 | // 7.18.1 Integer types | ||
| 68 | |||
| 69 | // 7.18.1.1 Exact-width integer types | ||
| 70 | |||
| 71 | // Visual Studio 6 and Embedded Visual C++ 4 doesn't | ||
| 72 | // realize that, e.g. char has the same size as __int8 | ||
| 73 | // so we give up on __intX for them. | ||
| 74 | #if (_MSC_VER < 1300) | ||
| 75 | typedef signed char int8_t; | ||
| 76 | typedef signed short int16_t; | ||
| 77 | typedef signed int int32_t; | ||
| 78 | typedef unsigned char uint8_t; | ||
| 79 | typedef unsigned short uint16_t; | ||
| 80 | typedef unsigned int uint32_t; | ||
| 81 | #else | ||
| 82 | typedef signed __int8 int8_t; | ||
| 83 | typedef signed __int16 int16_t; | ||
| 84 | typedef signed __int32 int32_t; | ||
| 85 | typedef unsigned __int8 uint8_t; | ||
| 86 | typedef unsigned __int16 uint16_t; | ||
| 87 | typedef unsigned __int32 uint32_t; | ||
| 88 | #endif | ||
| 89 | typedef signed __int64 int64_t; | ||
| 90 | typedef unsigned __int64 uint64_t; | ||
| 91 | |||
| 92 | |||
| 93 | // 7.18.1.2 Minimum-width integer types | ||
| 94 | typedef int8_t int_least8_t; | ||
| 95 | typedef int16_t int_least16_t; | ||
| 96 | typedef int32_t int_least32_t; | ||
| 97 | typedef int64_t int_least64_t; | ||
| 98 | typedef uint8_t uint_least8_t; | ||
| 99 | typedef uint16_t uint_least16_t; | ||
| 100 | typedef uint32_t uint_least32_t; | ||
| 101 | typedef uint64_t uint_least64_t; | ||
| 102 | |||
| 103 | // 7.18.1.3 Fastest minimum-width integer types | ||
| 104 | typedef int8_t int_fast8_t; | ||
| 105 | typedef int16_t int_fast16_t; | ||
| 106 | typedef int32_t int_fast32_t; | ||
| 107 | typedef int64_t int_fast64_t; | ||
| 108 | typedef uint8_t uint_fast8_t; | ||
| 109 | typedef uint16_t uint_fast16_t; | ||
| 110 | typedef uint32_t uint_fast32_t; | ||
| 111 | typedef uint64_t uint_fast64_t; | ||
| 112 | |||
| 113 | // 7.18.1.4 Integer types capable of holding object pointers | ||
| 114 | #ifdef _WIN64 // [ | ||
| 115 | typedef signed __int64 intptr_t; | ||
| 116 | typedef unsigned __int64 uintptr_t; | ||
| 117 | #else // _WIN64 ][ | ||
| 118 | typedef _W64 signed int intptr_t; | ||
| 119 | typedef _W64 unsigned int uintptr_t; | ||
| 120 | #endif // _WIN64 ] | ||
| 121 | |||
| 122 | // 7.18.1.5 Greatest-width integer types | ||
| 123 | typedef int64_t intmax_t; | ||
| 124 | typedef uint64_t uintmax_t; | ||
| 125 | |||
| 126 | |||
| 127 | // 7.18.2 Limits of specified-width integer types | ||
| 128 | |||
| 129 | #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 | ||
| 130 | |||
| 131 | // 7.18.2.1 Limits of exact-width integer types | ||
| 132 | #define INT8_MIN ((int8_t)_I8_MIN) | ||
| 133 | #define INT8_MAX _I8_MAX | ||
| 134 | #define INT16_MIN ((int16_t)_I16_MIN) | ||
| 135 | #define INT16_MAX _I16_MAX | ||
| 136 | #define INT32_MIN ((int32_t)_I32_MIN) | ||
| 137 | #define INT32_MAX _I32_MAX | ||
| 138 | #define INT64_MIN ((int64_t)_I64_MIN) | ||
| 139 | #define INT64_MAX _I64_MAX | ||
| 140 | #define UINT8_MAX _UI8_MAX | ||
| 141 | #define UINT16_MAX _UI16_MAX | ||
| 142 | #define UINT32_MAX _UI32_MAX | ||
| 143 | #define UINT64_MAX _UI64_MAX | ||
| 144 | |||
| 145 | // 7.18.2.2 Limits of minimum-width integer types | ||
| 146 | #define INT_LEAST8_MIN INT8_MIN | ||
| 147 | #define INT_LEAST8_MAX INT8_MAX | ||
| 148 | #define INT_LEAST16_MIN INT16_MIN | ||
| 149 | #define INT_LEAST16_MAX INT16_MAX | ||
| 150 | #define INT_LEAST32_MIN INT32_MIN | ||
| 151 | #define INT_LEAST32_MAX INT32_MAX | ||
| 152 | #define INT_LEAST64_MIN INT64_MIN | ||
| 153 | #define INT_LEAST64_MAX INT64_MAX | ||
| 154 | #define UINT_LEAST8_MAX UINT8_MAX | ||
| 155 | #define UINT_LEAST16_MAX UINT16_MAX | ||
| 156 | #define UINT_LEAST32_MAX UINT32_MAX | ||
| 157 | #define UINT_LEAST64_MAX UINT64_MAX | ||
| 158 | |||
| 159 | // 7.18.2.3 Limits of fastest minimum-width integer types | ||
| 160 | #define INT_FAST8_MIN INT8_MIN | ||
| 161 | #define INT_FAST8_MAX INT8_MAX | ||
| 162 | #define INT_FAST16_MIN INT16_MIN | ||
| 163 | #define INT_FAST16_MAX INT16_MAX | ||
| 164 | #define INT_FAST32_MIN INT32_MIN | ||
| 165 | #define INT_FAST32_MAX INT32_MAX | ||
| 166 | #define INT_FAST64_MIN INT64_MIN | ||
| 167 | #define INT_FAST64_MAX INT64_MAX | ||
| 168 | #define UINT_FAST8_MAX UINT8_MAX | ||
| 169 | #define UINT_FAST16_MAX UINT16_MAX | ||
| 170 | #define UINT_FAST32_MAX UINT32_MAX | ||
| 171 | #define UINT_FAST64_MAX UINT64_MAX | ||
| 172 | |||
| 173 | // 7.18.2.4 Limits of integer types capable of holding object pointers | ||
| 174 | #ifdef _WIN64 // [ | ||
| 175 | # define INTPTR_MIN INT64_MIN | ||
| 176 | # define INTPTR_MAX INT64_MAX | ||
| 177 | # define UINTPTR_MAX UINT64_MAX | ||
| 178 | #else // _WIN64 ][ | ||
| 179 | # define INTPTR_MIN INT32_MIN | ||
| 180 | # define INTPTR_MAX INT32_MAX | ||
| 181 | # define UINTPTR_MAX UINT32_MAX | ||
| 182 | #endif // _WIN64 ] | ||
| 183 | |||
| 184 | // 7.18.2.5 Limits of greatest-width integer types | ||
| 185 | #define INTMAX_MIN INT64_MIN | ||
| 186 | #define INTMAX_MAX INT64_MAX | ||
| 187 | #define UINTMAX_MAX UINT64_MAX | ||
| 188 | |||
| 189 | // 7.18.3 Limits of other integer types | ||
| 190 | |||
| 191 | #ifdef _WIN64 // [ | ||
| 192 | # define PTRDIFF_MIN _I64_MIN | ||
| 193 | # define PTRDIFF_MAX _I64_MAX | ||
| 194 | #else // _WIN64 ][ | ||
| 195 | # define PTRDIFF_MIN _I32_MIN | ||
| 196 | # define PTRDIFF_MAX _I32_MAX | ||
| 197 | #endif // _WIN64 ] | ||
| 198 | |||
| 199 | #define SIG_ATOMIC_MIN INT_MIN | ||
| 200 | #define SIG_ATOMIC_MAX INT_MAX | ||
| 201 | |||
| 202 | #ifndef SIZE_MAX // [ | ||
| 203 | # ifdef _WIN64 // [ | ||
| 204 | # define SIZE_MAX _UI64_MAX | ||
| 205 | # else // _WIN64 ][ | ||
| 206 | # define SIZE_MAX _UI32_MAX | ||
| 207 | # endif // _WIN64 ] | ||
| 208 | #endif // SIZE_MAX ] | ||
| 209 | |||
| 210 | // WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> | ||
| 211 | #ifndef WCHAR_MIN // [ | ||
| 212 | # define WCHAR_MIN 0 | ||
| 213 | #endif // WCHAR_MIN ] | ||
| 214 | #ifndef WCHAR_MAX // [ | ||
| 215 | # define WCHAR_MAX _UI16_MAX | ||
| 216 | #endif // WCHAR_MAX ] | ||
| 217 | |||
| 218 | #define WINT_MIN 0 | ||
| 219 | #define WINT_MAX _UI16_MAX | ||
| 220 | |||
| 221 | #endif // __STDC_LIMIT_MACROS ] | ||
| 222 | |||
| 223 | |||
| 224 | // 7.18.4 Limits of other integer types | ||
| 225 | |||
| 226 | #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 | ||
| 227 | |||
| 228 | // 7.18.4.1 Macros for minimum-width integer constants | ||
| 229 | |||
| 230 | #define INT8_C(val) val##i8 | ||
| 231 | #define INT16_C(val) val##i16 | ||
| 232 | #define INT32_C(val) val##i32 | ||
| 233 | #define INT64_C(val) val##i64 | ||
| 234 | |||
| 235 | #define UINT8_C(val) val##ui8 | ||
| 236 | #define UINT16_C(val) val##ui16 | ||
| 237 | #define UINT32_C(val) val##ui32 | ||
| 238 | #define UINT64_C(val) val##ui64 | ||
| 239 | |||
| 240 | // 7.18.4.2 Macros for greatest-width integer constants | ||
| 241 | #define INTMAX_C INT64_C | ||
| 242 | #define UINTMAX_C UINT64_C | ||
| 243 | |||
| 244 | #endif // __STDC_CONSTANT_MACROS ] | ||
| 245 | |||
| 246 | |||
| 247 | #endif // _MSC_STDINT_H_ ] | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/strings.h b/examples/redis-unstable/deps/jemalloc/include/msvc_compat/strings.h deleted file mode 100644 index 996f256..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/strings.h +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | #ifndef strings_h | ||
| 2 | #define strings_h | ||
| 3 | |||
| 4 | /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided | ||
| 5 | * for both */ | ||
| 6 | #ifdef _MSC_VER | ||
| 7 | # include <intrin.h> | ||
| 8 | # pragma intrinsic(_BitScanForward) | ||
| 9 | static __forceinline int ffsl(long x) { | ||
| 10 | unsigned long i; | ||
| 11 | |||
| 12 | if (_BitScanForward(&i, x)) { | ||
| 13 | return i + 1; | ||
| 14 | } | ||
| 15 | return 0; | ||
| 16 | } | ||
| 17 | |||
| 18 | static __forceinline int ffs(int x) { | ||
| 19 | return ffsl(x); | ||
| 20 | } | ||
| 21 | |||
| 22 | # ifdef _M_X64 | ||
| 23 | # pragma intrinsic(_BitScanForward64) | ||
| 24 | # endif | ||
| 25 | |||
| 26 | static __forceinline int ffsll(unsigned __int64 x) { | ||
| 27 | unsigned long i; | ||
| 28 | #ifdef _M_X64 | ||
| 29 | if (_BitScanForward64(&i, x)) { | ||
| 30 | return i + 1; | ||
| 31 | } | ||
| 32 | return 0; | ||
| 33 | #else | ||
| 34 | // Fallback for 32-bit build where 64-bit version not available | ||
| 35 | // assuming little endian | ||
| 36 | union { | ||
| 37 | unsigned __int64 ll; | ||
| 38 | unsigned long l[2]; | ||
| 39 | } s; | ||
| 40 | |||
| 41 | s.ll = x; | ||
| 42 | |||
| 43 | if (_BitScanForward(&i, s.l[0])) { | ||
| 44 | return i + 1; | ||
| 45 | } else if(_BitScanForward(&i, s.l[1])) { | ||
| 46 | return i + 33; | ||
| 47 | } | ||
| 48 | return 0; | ||
| 49 | #endif | ||
| 50 | } | ||
| 51 | |||
| 52 | #else | ||
| 53 | # define ffsll(x) __builtin_ffsll(x) | ||
| 54 | # define ffsl(x) __builtin_ffsl(x) | ||
| 55 | # define ffs(x) __builtin_ffs(x) | ||
| 56 | #endif | ||
| 57 | |||
| 58 | #endif /* strings_h */ | ||
diff --git a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/windows_extra.h b/examples/redis-unstable/deps/jemalloc/include/msvc_compat/windows_extra.h deleted file mode 100644 index a6ebb93..0000000 --- a/examples/redis-unstable/deps/jemalloc/include/msvc_compat/windows_extra.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef MSVC_COMPAT_WINDOWS_EXTRA_H | ||
| 2 | #define MSVC_COMPAT_WINDOWS_EXTRA_H | ||
| 3 | |||
| 4 | #include <errno.h> | ||
| 5 | |||
| 6 | #endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ | ||
