diff options
Diffstat (limited to 'examples/redis-unstable/deps/jemalloc/src/jemalloc.c')
| -rw-r--r-- | examples/redis-unstable/deps/jemalloc/src/jemalloc.c | 4539 |
1 files changed, 0 insertions, 4539 deletions
diff --git a/examples/redis-unstable/deps/jemalloc/src/jemalloc.c b/examples/redis-unstable/deps/jemalloc/src/jemalloc.c deleted file mode 100644 index 9a115f8..0000000 --- a/examples/redis-unstable/deps/jemalloc/src/jemalloc.c +++ /dev/null | |||
| @@ -1,4539 +0,0 @@ | |||
| 1 | #define JEMALLOC_C_ | ||
| 2 | #include "jemalloc/internal/jemalloc_preamble.h" | ||
| 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" | ||
| 4 | |||
| 5 | #include "jemalloc/internal/assert.h" | ||
| 6 | #include "jemalloc/internal/atomic.h" | ||
| 7 | #include "jemalloc/internal/buf_writer.h" | ||
| 8 | #include "jemalloc/internal/ctl.h" | ||
| 9 | #include "jemalloc/internal/emap.h" | ||
| 10 | #include "jemalloc/internal/extent_dss.h" | ||
| 11 | #include "jemalloc/internal/extent_mmap.h" | ||
| 12 | #include "jemalloc/internal/fxp.h" | ||
| 13 | #include "jemalloc/internal/san.h" | ||
| 14 | #include "jemalloc/internal/hook.h" | ||
| 15 | #include "jemalloc/internal/jemalloc_internal_types.h" | ||
| 16 | #include "jemalloc/internal/log.h" | ||
| 17 | #include "jemalloc/internal/malloc_io.h" | ||
| 18 | #include "jemalloc/internal/mutex.h" | ||
| 19 | #include "jemalloc/internal/nstime.h" | ||
| 20 | #include "jemalloc/internal/rtree.h" | ||
| 21 | #include "jemalloc/internal/safety_check.h" | ||
| 22 | #include "jemalloc/internal/sc.h" | ||
| 23 | #include "jemalloc/internal/spin.h" | ||
| 24 | #include "jemalloc/internal/sz.h" | ||
| 25 | #include "jemalloc/internal/ticker.h" | ||
| 26 | #include "jemalloc/internal/thread_event.h" | ||
| 27 | #include "jemalloc/internal/util.h" | ||
| 28 | |||
| 29 | /******************************************************************************/ | ||
| 30 | /* Data. */ | ||
| 31 | |||
| 32 | /* Runtime configuration options. */ | ||
| 33 | const char *je_malloc_conf | ||
| 34 | #ifndef _WIN32 | ||
| 35 | JEMALLOC_ATTR(weak) | ||
| 36 | #endif | ||
| 37 | ; | ||
| 38 | /* | ||
| 39 | * The usual rule is that the closer to runtime you are, the higher priority | ||
| 40 | * your configuration settings are (so the jemalloc config options get lower | ||
| 41 | * priority than the per-binary setting, which gets lower priority than the /etc | ||
| 42 | * setting, which gets lower priority than the environment settings). | ||
| 43 | * | ||
| 44 | * But it's a fairly common use case in some testing environments for a user to | ||
| 45 | * be able to control the binary, but nothing else (e.g. a performancy canary | ||
| 46 | * uses the production OS and environment variables, but can run any binary in | ||
| 47 | * those circumstances). For these use cases, it's handy to have an in-binary | ||
| 48 | * mechanism for overriding environment variable settings, with the idea that if | ||
| 49 | * the results are positive they get promoted to the official settings, and | ||
| 50 | * moved from the binary to the environment variable. | ||
| 51 | * | ||
| 52 | * We don't actually want this to be widespread, so we'll give it a silly name | ||
| 53 | * and not mention it in headers or documentation. | ||
| 54 | */ | ||
| 55 | const char *je_malloc_conf_2_conf_harder | ||
| 56 | #ifndef _WIN32 | ||
| 57 | JEMALLOC_ATTR(weak) | ||
| 58 | #endif | ||
| 59 | ; | ||
| 60 | |||
| 61 | bool opt_abort = | ||
| 62 | #ifdef JEMALLOC_DEBUG | ||
| 63 | true | ||
| 64 | #else | ||
| 65 | false | ||
| 66 | #endif | ||
| 67 | ; | ||
| 68 | bool opt_abort_conf = | ||
| 69 | #ifdef JEMALLOC_DEBUG | ||
| 70 | true | ||
| 71 | #else | ||
| 72 | false | ||
| 73 | #endif | ||
| 74 | ; | ||
| 75 | /* Intentionally default off, even with debug builds. */ | ||
| 76 | bool opt_confirm_conf = false; | ||
| 77 | const char *opt_junk = | ||
| 78 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) | ||
| 79 | "true" | ||
| 80 | #else | ||
| 81 | "false" | ||
| 82 | #endif | ||
| 83 | ; | ||
| 84 | bool opt_junk_alloc = | ||
| 85 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) | ||
| 86 | true | ||
| 87 | #else | ||
| 88 | false | ||
| 89 | #endif | ||
| 90 | ; | ||
| 91 | bool opt_junk_free = | ||
| 92 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) | ||
| 93 | true | ||
| 94 | #else | ||
| 95 | false | ||
| 96 | #endif | ||
| 97 | ; | ||
| 98 | bool opt_trust_madvise = | ||
| 99 | #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS | ||
| 100 | false | ||
| 101 | #else | ||
| 102 | true | ||
| 103 | #endif | ||
| 104 | ; | ||
| 105 | |||
| 106 | bool opt_cache_oblivious = | ||
| 107 | #ifdef JEMALLOC_CACHE_OBLIVIOUS | ||
| 108 | true | ||
| 109 | #else | ||
| 110 | false | ||
| 111 | #endif | ||
| 112 | ; | ||
| 113 | |||
| 114 | zero_realloc_action_t opt_zero_realloc_action = | ||
| 115 | #ifdef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE | ||
| 116 | zero_realloc_action_free | ||
| 117 | #else | ||
| 118 | zero_realloc_action_alloc | ||
| 119 | #endif | ||
| 120 | ; | ||
| 121 | |||
| 122 | atomic_zu_t zero_realloc_count = ATOMIC_INIT(0); | ||
| 123 | |||
| 124 | const char *zero_realloc_mode_names[] = { | ||
| 125 | "alloc", | ||
| 126 | "free", | ||
| 127 | "abort", | ||
| 128 | }; | ||
| 129 | |||
| 130 | /* | ||
| 131 | * These are the documented values for junk fill debugging facilities -- see the | ||
| 132 | * man page. | ||
| 133 | */ | ||
| 134 | static const uint8_t junk_alloc_byte = 0xa5; | ||
| 135 | static const uint8_t junk_free_byte = 0x5a; | ||
| 136 | |||
| 137 | static void default_junk_alloc(void *ptr, size_t usize) { | ||
| 138 | memset(ptr, junk_alloc_byte, usize); | ||
| 139 | } | ||
| 140 | |||
| 141 | static void default_junk_free(void *ptr, size_t usize) { | ||
| 142 | memset(ptr, junk_free_byte, usize); | ||
| 143 | } | ||
| 144 | |||
| 145 | void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc; | ||
| 146 | void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free; | ||
| 147 | |||
| 148 | bool opt_utrace = false; | ||
| 149 | bool opt_xmalloc = false; | ||
| 150 | bool opt_experimental_infallible_new = false; | ||
| 151 | bool opt_zero = false; | ||
| 152 | unsigned opt_narenas = 0; | ||
| 153 | fxp_t opt_narenas_ratio = FXP_INIT_INT(4); | ||
| 154 | |||
| 155 | unsigned ncpus; | ||
| 156 | |||
| 157 | /* Protects arenas initialization. */ | ||
| 158 | malloc_mutex_t arenas_lock; | ||
| 159 | |||
| 160 | /* The global hpa, and whether it's on. */ | ||
| 161 | bool opt_hpa = false; | ||
| 162 | hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT; | ||
| 163 | sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT; | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Arenas that are used to service external requests. Not all elements of the | ||
| 167 | * arenas array are necessarily used; arenas are created lazily as needed. | ||
| 168 | * | ||
| 169 | * arenas[0..narenas_auto) are used for automatic multiplexing of threads and | ||
| 170 | * arenas. arenas[narenas_auto..narenas_total) are only used if the application | ||
| 171 | * takes some action to create them and allocate from them. | ||
| 172 | * | ||
| 173 | * Points to an arena_t. | ||
| 174 | */ | ||
| 175 | JEMALLOC_ALIGNED(CACHELINE) | ||
| 176 | atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; | ||
| 177 | static atomic_u_t narenas_total; /* Use narenas_total_*(). */ | ||
| 178 | /* Below three are read-only after initialization. */ | ||
| 179 | static arena_t *a0; /* arenas[0]. */ | ||
| 180 | unsigned narenas_auto; | ||
| 181 | unsigned manual_arena_base; | ||
| 182 | |||
| 183 | malloc_init_t malloc_init_state = malloc_init_uninitialized; | ||
| 184 | |||
| 185 | /* False should be the common case. Set to true to trigger initialization. */ | ||
| 186 | bool malloc_slow = true; | ||
| 187 | |||
| 188 | /* When malloc_slow is true, set the corresponding bits for sanity check. */ | ||
| 189 | enum { | ||
| 190 | flag_opt_junk_alloc = (1U), | ||
| 191 | flag_opt_junk_free = (1U << 1), | ||
| 192 | flag_opt_zero = (1U << 2), | ||
| 193 | flag_opt_utrace = (1U << 3), | ||
| 194 | flag_opt_xmalloc = (1U << 4) | ||
| 195 | }; | ||
| 196 | static uint8_t malloc_slow_flags; | ||
| 197 | |||
| 198 | #ifdef JEMALLOC_THREADED_INIT | ||
| 199 | /* Used to let the initializing thread recursively allocate. */ | ||
| 200 | # define NO_INITIALIZER ((unsigned long)0) | ||
| 201 | # define INITIALIZER pthread_self() | ||
| 202 | # define IS_INITIALIZER (malloc_initializer == pthread_self()) | ||
| 203 | static pthread_t malloc_initializer = NO_INITIALIZER; | ||
| 204 | #else | ||
| 205 | # define NO_INITIALIZER false | ||
| 206 | # define INITIALIZER true | ||
| 207 | # define IS_INITIALIZER malloc_initializer | ||
| 208 | static bool malloc_initializer = NO_INITIALIZER; | ||
| 209 | #endif | ||
| 210 | |||
| 211 | /* Used to avoid initialization races. */ | ||
| 212 | #ifdef _WIN32 | ||
| 213 | #if _WIN32_WINNT >= 0x0600 | ||
| 214 | static malloc_mutex_t init_lock = SRWLOCK_INIT; | ||
| 215 | #else | ||
| 216 | static malloc_mutex_t init_lock; | ||
| 217 | static bool init_lock_initialized = false; | ||
| 218 | |||
| 219 | JEMALLOC_ATTR(constructor) | ||
| 220 | static void WINAPI | ||
| 221 | _init_init_lock(void) { | ||
| 222 | /* | ||
| 223 | * If another constructor in the same binary is using mallctl to e.g. | ||
| 224 | * set up extent hooks, it may end up running before this one, and | ||
| 225 | * malloc_init_hard will crash trying to lock the uninitialized lock. So | ||
| 226 | * we force an initialization of the lock in malloc_init_hard as well. | ||
| 227 | * We don't try to care about atomicity of the accessed to the | ||
| 228 | * init_lock_initialized boolean, since it really only matters early in | ||
| 229 | * the process creation, before any separate thread normally starts | ||
| 230 | * doing anything. | ||
| 231 | */ | ||
| 232 | if (!init_lock_initialized) { | ||
| 233 | malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, | ||
| 234 | malloc_mutex_rank_exclusive); | ||
| 235 | } | ||
| 236 | init_lock_initialized = true; | ||
| 237 | } | ||
| 238 | |||
| 239 | #ifdef _MSC_VER | ||
| 240 | # pragma section(".CRT$XCU", read) | ||
| 241 | JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) | ||
| 242 | static const void (WINAPI *init_init_lock)(void) = _init_init_lock; | ||
| 243 | #endif | ||
| 244 | #endif | ||
| 245 | #else | ||
| 246 | static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; | ||
| 247 | #endif | ||
| 248 | |||
| 249 | typedef struct { | ||
| 250 | void *p; /* Input pointer (as in realloc(p, s)). */ | ||
| 251 | size_t s; /* Request size. */ | ||
| 252 | void *r; /* Result pointer. */ | ||
| 253 | } malloc_utrace_t; | ||
| 254 | |||
| 255 | #ifdef JEMALLOC_UTRACE | ||
| 256 | # define UTRACE(a, b, c) do { \ | ||
| 257 | if (unlikely(opt_utrace)) { \ | ||
| 258 | int utrace_serrno = errno; \ | ||
| 259 | malloc_utrace_t ut; \ | ||
| 260 | ut.p = (a); \ | ||
| 261 | ut.s = (b); \ | ||
| 262 | ut.r = (c); \ | ||
| 263 | UTRACE_CALL(&ut, sizeof(ut)); \ | ||
| 264 | errno = utrace_serrno; \ | ||
| 265 | } \ | ||
| 266 | } while (0) | ||
| 267 | #else | ||
| 268 | # define UTRACE(a, b, c) | ||
| 269 | #endif | ||
| 270 | |||
| 271 | /* Whether encountered any invalid config options. */ | ||
| 272 | static bool had_conf_error = false; | ||
| 273 | |||
| 274 | /******************************************************************************/ | ||
| 275 | /* | ||
| 276 | * Function prototypes for static functions that are referenced prior to | ||
| 277 | * definition. | ||
| 278 | */ | ||
| 279 | |||
| 280 | static bool malloc_init_hard_a0(void); | ||
| 281 | static bool malloc_init_hard(void); | ||
| 282 | |||
| 283 | /******************************************************************************/ | ||
| 284 | /* | ||
| 285 | * Begin miscellaneous support functions. | ||
| 286 | */ | ||
| 287 | |||
| 288 | JEMALLOC_ALWAYS_INLINE bool | ||
| 289 | malloc_init_a0(void) { | ||
| 290 | if (unlikely(malloc_init_state == malloc_init_uninitialized)) { | ||
| 291 | return malloc_init_hard_a0(); | ||
| 292 | } | ||
| 293 | return false; | ||
| 294 | } | ||
| 295 | |||
| 296 | JEMALLOC_ALWAYS_INLINE bool | ||
| 297 | malloc_init(void) { | ||
| 298 | if (unlikely(!malloc_initialized()) && malloc_init_hard()) { | ||
| 299 | return true; | ||
| 300 | } | ||
| 301 | return false; | ||
| 302 | } | ||
| 303 | |||
| 304 | /* | ||
| 305 | * The a0*() functions are used instead of i{d,}alloc() in situations that | ||
| 306 | * cannot tolerate TLS variable access. | ||
| 307 | */ | ||
| 308 | |||
| 309 | static void * | ||
| 310 | a0ialloc(size_t size, bool zero, bool is_internal) { | ||
| 311 | if (unlikely(malloc_init_a0())) { | ||
| 312 | return NULL; | ||
| 313 | } | ||
| 314 | |||
| 315 | return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, | ||
| 316 | is_internal, arena_get(TSDN_NULL, 0, true), true); | ||
| 317 | } | ||
| 318 | |||
| 319 | static void | ||
| 320 | a0idalloc(void *ptr, bool is_internal) { | ||
| 321 | idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); | ||
| 322 | } | ||
| 323 | |||
| 324 | void * | ||
| 325 | a0malloc(size_t size) { | ||
| 326 | return a0ialloc(size, false, true); | ||
| 327 | } | ||
| 328 | |||
| 329 | void | ||
| 330 | a0dalloc(void *ptr) { | ||
| 331 | a0idalloc(ptr, true); | ||
| 332 | } | ||
| 333 | |||
| 334 | /* | ||
| 335 | * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-sensitive | ||
| 336 | * situations that cannot tolerate TLS variable access (TLS allocation and very | ||
| 337 | * early internal data structure initialization). | ||
| 338 | */ | ||
| 339 | |||
| 340 | void * | ||
| 341 | bootstrap_malloc(size_t size) { | ||
| 342 | if (unlikely(size == 0)) { | ||
| 343 | size = 1; | ||
| 344 | } | ||
| 345 | |||
| 346 | return a0ialloc(size, false, false); | ||
| 347 | } | ||
| 348 | |||
| 349 | void * | ||
| 350 | bootstrap_calloc(size_t num, size_t size) { | ||
| 351 | size_t num_size; | ||
| 352 | |||
| 353 | num_size = num * size; | ||
| 354 | if (unlikely(num_size == 0)) { | ||
| 355 | assert(num == 0 || size == 0); | ||
| 356 | num_size = 1; | ||
| 357 | } | ||
| 358 | |||
| 359 | return a0ialloc(num_size, true, false); | ||
| 360 | } | ||
| 361 | |||
| 362 | void | ||
| 363 | bootstrap_free(void *ptr) { | ||
| 364 | if (unlikely(ptr == NULL)) { | ||
| 365 | return; | ||
| 366 | } | ||
| 367 | |||
| 368 | a0idalloc(ptr, false); | ||
| 369 | } | ||
| 370 | |||
| 371 | void | ||
| 372 | arena_set(unsigned ind, arena_t *arena) { | ||
| 373 | atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); | ||
| 374 | } | ||
| 375 | |||
| 376 | static void | ||
| 377 | narenas_total_set(unsigned narenas) { | ||
| 378 | atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); | ||
| 379 | } | ||
| 380 | |||
| 381 | static void | ||
| 382 | narenas_total_inc(void) { | ||
| 383 | atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); | ||
| 384 | } | ||
| 385 | |||
| 386 | unsigned | ||
| 387 | narenas_total_get(void) { | ||
| 388 | return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); | ||
| 389 | } | ||
| 390 | |||
| 391 | /* Create a new arena and insert it into the arenas array at index ind. */ | ||
| 392 | static arena_t * | ||
| 393 | arena_init_locked(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { | ||
| 394 | arena_t *arena; | ||
| 395 | |||
| 396 | assert(ind <= narenas_total_get()); | ||
| 397 | if (ind >= MALLOCX_ARENA_LIMIT) { | ||
| 398 | return NULL; | ||
| 399 | } | ||
| 400 | if (ind == narenas_total_get()) { | ||
| 401 | narenas_total_inc(); | ||
| 402 | } | ||
| 403 | |||
| 404 | /* | ||
| 405 | * Another thread may have already initialized arenas[ind] if it's an | ||
| 406 | * auto arena. | ||
| 407 | */ | ||
| 408 | arena = arena_get(tsdn, ind, false); | ||
| 409 | if (arena != NULL) { | ||
| 410 | assert(arena_is_auto(arena)); | ||
| 411 | return arena; | ||
| 412 | } | ||
| 413 | |||
| 414 | /* Actually initialize the arena. */ | ||
| 415 | arena = arena_new(tsdn, ind, config); | ||
| 416 | |||
| 417 | return arena; | ||
| 418 | } | ||
| 419 | |||
| 420 | static void | ||
| 421 | arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { | ||
| 422 | if (ind == 0) { | ||
| 423 | return; | ||
| 424 | } | ||
| 425 | /* | ||
| 426 | * Avoid creating a new background thread just for the huge arena, which | ||
| 427 | * purges eagerly by default. | ||
| 428 | */ | ||
| 429 | if (have_background_thread && !arena_is_huge(ind)) { | ||
| 430 | if (background_thread_create(tsdn_tsd(tsdn), ind)) { | ||
| 431 | malloc_printf("<jemalloc>: error in background thread " | ||
| 432 | "creation for arena %u. Abort.\n", ind); | ||
| 433 | abort(); | ||
| 434 | } | ||
| 435 | } | ||
| 436 | } | ||
| 437 | |||
| 438 | arena_t * | ||
| 439 | arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { | ||
| 440 | arena_t *arena; | ||
| 441 | |||
| 442 | malloc_mutex_lock(tsdn, &arenas_lock); | ||
| 443 | arena = arena_init_locked(tsdn, ind, config); | ||
| 444 | malloc_mutex_unlock(tsdn, &arenas_lock); | ||
| 445 | |||
| 446 | arena_new_create_background_thread(tsdn, ind); | ||
| 447 | |||
| 448 | return arena; | ||
| 449 | } | ||
| 450 | |||
| 451 | static void | ||
| 452 | arena_bind(tsd_t *tsd, unsigned ind, bool internal) { | ||
| 453 | arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); | ||
| 454 | arena_nthreads_inc(arena, internal); | ||
| 455 | |||
| 456 | if (internal) { | ||
| 457 | tsd_iarena_set(tsd, arena); | ||
| 458 | } else { | ||
| 459 | tsd_arena_set(tsd, arena); | ||
| 460 | unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1, | ||
| 461 | ATOMIC_RELAXED); | ||
| 462 | tsd_binshards_t *bins = tsd_binshardsp_get(tsd); | ||
| 463 | for (unsigned i = 0; i < SC_NBINS; i++) { | ||
| 464 | assert(bin_infos[i].n_shards > 0 && | ||
| 465 | bin_infos[i].n_shards <= BIN_SHARDS_MAX); | ||
| 466 | bins->binshard[i] = shard % bin_infos[i].n_shards; | ||
| 467 | } | ||
| 468 | } | ||
| 469 | } | ||
| 470 | |||
| 471 | void | ||
| 472 | arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena) { | ||
| 473 | assert(oldarena != NULL); | ||
| 474 | assert(newarena != NULL); | ||
| 475 | |||
| 476 | arena_nthreads_dec(oldarena, false); | ||
| 477 | arena_nthreads_inc(newarena, false); | ||
| 478 | tsd_arena_set(tsd, newarena); | ||
| 479 | |||
| 480 | if (arena_nthreads_get(oldarena, false) == 0) { | ||
| 481 | /* Purge if the old arena has no associated threads anymore. */ | ||
| 482 | arena_decay(tsd_tsdn(tsd), oldarena, | ||
| 483 | /* is_background_thread */ false, /* all */ true); | ||
| 484 | } | ||
| 485 | } | ||
| 486 | |||
| 487 | static void | ||
| 488 | arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { | ||
| 489 | arena_t *arena; | ||
| 490 | |||
| 491 | arena = arena_get(tsd_tsdn(tsd), ind, false); | ||
| 492 | arena_nthreads_dec(arena, internal); | ||
| 493 | |||
| 494 | if (internal) { | ||
| 495 | tsd_iarena_set(tsd, NULL); | ||
| 496 | } else { | ||
| 497 | tsd_arena_set(tsd, NULL); | ||
| 498 | } | ||
| 499 | } | ||
| 500 | |||
| 501 | /* Slow path, called only by arena_choose(). */ | ||
| 502 | arena_t * | ||
| 503 | arena_choose_hard(tsd_t *tsd, bool internal) { | ||
| 504 | arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); | ||
| 505 | |||
| 506 | if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { | ||
| 507 | unsigned choose = percpu_arena_choose(); | ||
| 508 | ret = arena_get(tsd_tsdn(tsd), choose, true); | ||
| 509 | assert(ret != NULL); | ||
| 510 | arena_bind(tsd, arena_ind_get(ret), false); | ||
| 511 | arena_bind(tsd, arena_ind_get(ret), true); | ||
| 512 | |||
| 513 | return ret; | ||
| 514 | } | ||
| 515 | |||
| 516 | if (narenas_auto > 1) { | ||
| 517 | unsigned i, j, choose[2], first_null; | ||
| 518 | bool is_new_arena[2]; | ||
| 519 | |||
| 520 | /* | ||
| 521 | * Determine binding for both non-internal and internal | ||
| 522 | * allocation. | ||
| 523 | * | ||
| 524 | * choose[0]: For application allocation. | ||
| 525 | * choose[1]: For internal metadata allocation. | ||
| 526 | */ | ||
| 527 | |||
| 528 | for (j = 0; j < 2; j++) { | ||
| 529 | choose[j] = 0; | ||
| 530 | is_new_arena[j] = false; | ||
| 531 | } | ||
| 532 | |||
| 533 | first_null = narenas_auto; | ||
| 534 | malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); | ||
| 535 | assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); | ||
| 536 | for (i = 1; i < narenas_auto; i++) { | ||
| 537 | if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { | ||
| 538 | /* | ||
| 539 | * Choose the first arena that has the lowest | ||
| 540 | * number of threads assigned to it. | ||
| 541 | */ | ||
| 542 | for (j = 0; j < 2; j++) { | ||
| 543 | if (arena_nthreads_get(arena_get( | ||
| 544 | tsd_tsdn(tsd), i, false), !!j) < | ||
| 545 | arena_nthreads_get(arena_get( | ||
| 546 | tsd_tsdn(tsd), choose[j], false), | ||
| 547 | !!j)) { | ||
| 548 | choose[j] = i; | ||
| 549 | } | ||
| 550 | } | ||
| 551 | } else if (first_null == narenas_auto) { | ||
| 552 | /* | ||
| 553 | * Record the index of the first uninitialized | ||
| 554 | * arena, in case all extant arenas are in use. | ||
| 555 | * | ||
| 556 | * NB: It is possible for there to be | ||
| 557 | * discontinuities in terms of initialized | ||
| 558 | * versus uninitialized arenas, due to the | ||
| 559 | * "thread.arena" mallctl. | ||
| 560 | */ | ||
| 561 | first_null = i; | ||
| 562 | } | ||
| 563 | } | ||
| 564 | |||
| 565 | for (j = 0; j < 2; j++) { | ||
| 566 | if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), | ||
| 567 | choose[j], false), !!j) == 0 || first_null == | ||
| 568 | narenas_auto) { | ||
| 569 | /* | ||
| 570 | * Use an unloaded arena, or the least loaded | ||
| 571 | * arena if all arenas are already initialized. | ||
| 572 | */ | ||
| 573 | if (!!j == internal) { | ||
| 574 | ret = arena_get(tsd_tsdn(tsd), | ||
| 575 | choose[j], false); | ||
| 576 | } | ||
| 577 | } else { | ||
| 578 | arena_t *arena; | ||
| 579 | |||
| 580 | /* Initialize a new arena. */ | ||
| 581 | choose[j] = first_null; | ||
| 582 | arena = arena_init_locked(tsd_tsdn(tsd), | ||
| 583 | choose[j], &arena_config_default); | ||
| 584 | if (arena == NULL) { | ||
| 585 | malloc_mutex_unlock(tsd_tsdn(tsd), | ||
| 586 | &arenas_lock); | ||
| 587 | return NULL; | ||
| 588 | } | ||
| 589 | is_new_arena[j] = true; | ||
| 590 | if (!!j == internal) { | ||
| 591 | ret = arena; | ||
| 592 | } | ||
| 593 | } | ||
| 594 | arena_bind(tsd, choose[j], !!j); | ||
| 595 | } | ||
| 596 | malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); | ||
| 597 | |||
| 598 | for (j = 0; j < 2; j++) { | ||
| 599 | if (is_new_arena[j]) { | ||
| 600 | assert(choose[j] > 0); | ||
| 601 | arena_new_create_background_thread( | ||
| 602 | tsd_tsdn(tsd), choose[j]); | ||
| 603 | } | ||
| 604 | } | ||
| 605 | |||
| 606 | } else { | ||
| 607 | ret = arena_get(tsd_tsdn(tsd), 0, false); | ||
| 608 | arena_bind(tsd, 0, false); | ||
| 609 | arena_bind(tsd, 0, true); | ||
| 610 | } | ||
| 611 | |||
| 612 | return ret; | ||
| 613 | } | ||
| 614 | |||
| 615 | void | ||
| 616 | iarena_cleanup(tsd_t *tsd) { | ||
| 617 | arena_t *iarena; | ||
| 618 | |||
| 619 | iarena = tsd_iarena_get(tsd); | ||
| 620 | if (iarena != NULL) { | ||
| 621 | arena_unbind(tsd, arena_ind_get(iarena), true); | ||
| 622 | } | ||
| 623 | } | ||
| 624 | |||
| 625 | void | ||
| 626 | arena_cleanup(tsd_t *tsd) { | ||
| 627 | arena_t *arena; | ||
| 628 | |||
| 629 | arena = tsd_arena_get(tsd); | ||
| 630 | if (arena != NULL) { | ||
| 631 | arena_unbind(tsd, arena_ind_get(arena), false); | ||
| 632 | } | ||
| 633 | } | ||
| 634 | |||
| 635 | static void | ||
| 636 | stats_print_atexit(void) { | ||
| 637 | if (config_stats) { | ||
| 638 | tsdn_t *tsdn; | ||
| 639 | unsigned narenas, i; | ||
| 640 | |||
| 641 | tsdn = tsdn_fetch(); | ||
| 642 | |||
| 643 | /* | ||
| 644 | * Merge stats from extant threads. This is racy, since | ||
| 645 | * individual threads do not lock when recording tcache stats | ||
| 646 | * events. As a consequence, the final stats may be slightly | ||
| 647 | * out of date by the time they are reported, if other threads | ||
| 648 | * continue to allocate. | ||
| 649 | */ | ||
| 650 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { | ||
| 651 | arena_t *arena = arena_get(tsdn, i, false); | ||
| 652 | if (arena != NULL) { | ||
| 653 | tcache_slow_t *tcache_slow; | ||
| 654 | |||
| 655 | malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); | ||
| 656 | ql_foreach(tcache_slow, &arena->tcache_ql, | ||
| 657 | link) { | ||
| 658 | tcache_stats_merge(tsdn, | ||
| 659 | tcache_slow->tcache, arena); | ||
| 660 | } | ||
| 661 | malloc_mutex_unlock(tsdn, | ||
| 662 | &arena->tcache_ql_mtx); | ||
| 663 | } | ||
| 664 | } | ||
| 665 | } | ||
| 666 | je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); | ||
| 667 | } | ||
| 668 | |||
| 669 | /* | ||
| 670 | * Ensure that we don't hold any locks upon entry to or exit from allocator | ||
| 671 | * code (in a "broad" sense that doesn't count a reentrant allocation as an | ||
| 672 | * entrance or exit). | ||
| 673 | */ | ||
| 674 | JEMALLOC_ALWAYS_INLINE void | ||
| 675 | check_entry_exit_locking(tsdn_t *tsdn) { | ||
| 676 | if (!config_debug) { | ||
| 677 | return; | ||
| 678 | } | ||
| 679 | if (tsdn_null(tsdn)) { | ||
| 680 | return; | ||
| 681 | } | ||
| 682 | tsd_t *tsd = tsdn_tsd(tsdn); | ||
| 683 | /* | ||
| 684 | * It's possible we hold locks at entry/exit if we're in a nested | ||
| 685 | * allocation. | ||
| 686 | */ | ||
| 687 | int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); | ||
| 688 | if (reentrancy_level != 0) { | ||
| 689 | return; | ||
| 690 | } | ||
| 691 | witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); | ||
| 692 | } | ||
| 693 | |||
| 694 | /* | ||
| 695 | * End miscellaneous support functions. | ||
| 696 | */ | ||
| 697 | /******************************************************************************/ | ||
| 698 | /* | ||
| 699 | * Begin initialization functions. | ||
| 700 | */ | ||
| 701 | |||
| 702 | static char * | ||
| 703 | jemalloc_secure_getenv(const char *name) { | ||
| 704 | #ifdef JEMALLOC_HAVE_SECURE_GETENV | ||
| 705 | return secure_getenv(name); | ||
| 706 | #else | ||
| 707 | # ifdef JEMALLOC_HAVE_ISSETUGID | ||
| 708 | if (issetugid() != 0) { | ||
| 709 | return NULL; | ||
| 710 | } | ||
| 711 | # endif | ||
| 712 | return getenv(name); | ||
| 713 | #endif | ||
| 714 | } | ||
| 715 | |||
| 716 | static unsigned | ||
| 717 | malloc_ncpus(void) { | ||
| 718 | long result; | ||
| 719 | |||
| 720 | #ifdef _WIN32 | ||
| 721 | SYSTEM_INFO si; | ||
| 722 | GetSystemInfo(&si); | ||
| 723 | result = si.dwNumberOfProcessors; | ||
| 724 | #elif defined(CPU_COUNT) | ||
| 725 | /* | ||
| 726 | * glibc >= 2.6 has the CPU_COUNT macro. | ||
| 727 | * | ||
| 728 | * glibc's sysconf() uses isspace(). glibc allocates for the first time | ||
| 729 | * *before* setting up the isspace tables. Therefore we need a | ||
| 730 | * different method to get the number of CPUs. | ||
| 731 | * | ||
| 732 | * The getaffinity approach is also preferred when only a subset of CPUs | ||
| 733 | * is available, to avoid using more arenas than necessary. | ||
| 734 | */ | ||
| 735 | { | ||
| 736 | # if defined(__FreeBSD__) || defined(__DragonFly__) | ||
| 737 | cpuset_t set; | ||
| 738 | # else | ||
| 739 | cpu_set_t set; | ||
| 740 | # endif | ||
| 741 | # if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) | ||
| 742 | sched_getaffinity(0, sizeof(set), &set); | ||
| 743 | # else | ||
| 744 | pthread_getaffinity_np(pthread_self(), sizeof(set), &set); | ||
| 745 | # endif | ||
| 746 | result = CPU_COUNT(&set); | ||
| 747 | } | ||
| 748 | #else | ||
| 749 | result = sysconf(_SC_NPROCESSORS_ONLN); | ||
| 750 | #endif | ||
| 751 | return ((result == -1) ? 1 : (unsigned)result); | ||
| 752 | } | ||
| 753 | |||
| 754 | /* | ||
| 755 | * Ensure that number of CPUs is determistinc, i.e. it is the same based on: | ||
| 756 | * - sched_getaffinity() | ||
| 757 | * - _SC_NPROCESSORS_ONLN | ||
| 758 | * - _SC_NPROCESSORS_CONF | ||
| 759 | * Since otherwise tricky things is possible with percpu arenas in use. | ||
| 760 | */ | ||
| 761 | static bool | ||
| 762 | malloc_cpu_count_is_deterministic() | ||
| 763 | { | ||
| 764 | #ifdef _WIN32 | ||
| 765 | return true; | ||
| 766 | #else | ||
| 767 | long cpu_onln = sysconf(_SC_NPROCESSORS_ONLN); | ||
| 768 | long cpu_conf = sysconf(_SC_NPROCESSORS_CONF); | ||
| 769 | if (cpu_onln != cpu_conf) { | ||
| 770 | return false; | ||
| 771 | } | ||
| 772 | # if defined(CPU_COUNT) | ||
| 773 | # if defined(__FreeBSD__) || defined(__DragonFly__) | ||
| 774 | cpuset_t set; | ||
| 775 | # else | ||
| 776 | cpu_set_t set; | ||
| 777 | # endif /* __FreeBSD__ */ | ||
| 778 | # if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) | ||
| 779 | sched_getaffinity(0, sizeof(set), &set); | ||
| 780 | # else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */ | ||
| 781 | pthread_getaffinity_np(pthread_self(), sizeof(set), &set); | ||
| 782 | # endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */ | ||
| 783 | long cpu_affinity = CPU_COUNT(&set); | ||
| 784 | if (cpu_affinity != cpu_conf) { | ||
| 785 | return false; | ||
| 786 | } | ||
| 787 | # endif /* CPU_COUNT */ | ||
| 788 | return true; | ||
| 789 | #endif | ||
| 790 | } | ||
| 791 | |||
| 792 | static void | ||
| 793 | init_opt_stats_opts(const char *v, size_t vlen, char *dest) { | ||
| 794 | size_t opts_len = strlen(dest); | ||
| 795 | assert(opts_len <= stats_print_tot_num_options); | ||
| 796 | |||
| 797 | for (size_t i = 0; i < vlen; i++) { | ||
| 798 | switch (v[i]) { | ||
| 799 | #define OPTION(o, v, d, s) case o: break; | ||
| 800 | STATS_PRINT_OPTIONS | ||
| 801 | #undef OPTION | ||
| 802 | default: continue; | ||
| 803 | } | ||
| 804 | |||
| 805 | if (strchr(dest, v[i]) != NULL) { | ||
| 806 | /* Ignore repeated. */ | ||
| 807 | continue; | ||
| 808 | } | ||
| 809 | |||
| 810 | dest[opts_len++] = v[i]; | ||
| 811 | dest[opts_len] = '\0'; | ||
| 812 | assert(opts_len <= stats_print_tot_num_options); | ||
| 813 | } | ||
| 814 | assert(opts_len == strlen(dest)); | ||
| 815 | } | ||
| 816 | |||
| 817 | /* Reads the next size pair in a multi-sized option. */ | ||
| 818 | static bool | ||
| 819 | malloc_conf_multi_sizes_next(const char **slab_size_segment_cur, | ||
| 820 | size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) { | ||
| 821 | const char *cur = *slab_size_segment_cur; | ||
| 822 | char *end; | ||
| 823 | uintmax_t um; | ||
| 824 | |||
| 825 | set_errno(0); | ||
| 826 | |||
| 827 | /* First number, then '-' */ | ||
| 828 | um = malloc_strtoumax(cur, &end, 0); | ||
| 829 | if (get_errno() != 0 || *end != '-') { | ||
| 830 | return true; | ||
| 831 | } | ||
| 832 | *slab_start = (size_t)um; | ||
| 833 | cur = end + 1; | ||
| 834 | |||
| 835 | /* Second number, then ':' */ | ||
| 836 | um = malloc_strtoumax(cur, &end, 0); | ||
| 837 | if (get_errno() != 0 || *end != ':') { | ||
| 838 | return true; | ||
| 839 | } | ||
| 840 | *slab_end = (size_t)um; | ||
| 841 | cur = end + 1; | ||
| 842 | |||
| 843 | /* Last number */ | ||
| 844 | um = malloc_strtoumax(cur, &end, 0); | ||
| 845 | if (get_errno() != 0) { | ||
| 846 | return true; | ||
| 847 | } | ||
| 848 | *new_size = (size_t)um; | ||
| 849 | |||
| 850 | /* Consume the separator if there is one. */ | ||
| 851 | if (*end == '|') { | ||
| 852 | end++; | ||
| 853 | } | ||
| 854 | |||
| 855 | *vlen_left -= end - *slab_size_segment_cur; | ||
| 856 | *slab_size_segment_cur = end; | ||
| 857 | |||
| 858 | return false; | ||
| 859 | } | ||
| 860 | |||
| 861 | static bool | ||
| 862 | malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, | ||
| 863 | char const **v_p, size_t *vlen_p) { | ||
| 864 | bool accept; | ||
| 865 | const char *opts = *opts_p; | ||
| 866 | |||
| 867 | *k_p = opts; | ||
| 868 | |||
| 869 | for (accept = false; !accept;) { | ||
| 870 | switch (*opts) { | ||
| 871 | case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': | ||
| 872 | case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': | ||
| 873 | case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': | ||
| 874 | case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': | ||
| 875 | case 'Y': case 'Z': | ||
| 876 | case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': | ||
| 877 | case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': | ||
| 878 | case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': | ||
| 879 | case 's': case 't': case 'u': case 'v': case 'w': case 'x': | ||
| 880 | case 'y': case 'z': | ||
| 881 | case '0': case '1': case '2': case '3': case '4': case '5': | ||
| 882 | case '6': case '7': case '8': case '9': | ||
| 883 | case '_': | ||
| 884 | opts++; | ||
| 885 | break; | ||
| 886 | case ':': | ||
| 887 | opts++; | ||
| 888 | *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; | ||
| 889 | *v_p = opts; | ||
| 890 | accept = true; | ||
| 891 | break; | ||
| 892 | case '\0': | ||
| 893 | if (opts != *opts_p) { | ||
| 894 | malloc_write("<jemalloc>: Conf string ends " | ||
| 895 | "with key\n"); | ||
| 896 | had_conf_error = true; | ||
| 897 | } | ||
| 898 | return true; | ||
| 899 | default: | ||
| 900 | malloc_write("<jemalloc>: Malformed conf string\n"); | ||
| 901 | had_conf_error = true; | ||
| 902 | return true; | ||
| 903 | } | ||
| 904 | } | ||
| 905 | |||
| 906 | for (accept = false; !accept;) { | ||
| 907 | switch (*opts) { | ||
| 908 | case ',': | ||
| 909 | opts++; | ||
| 910 | /* | ||
| 911 | * Look ahead one character here, because the next time | ||
| 912 | * this function is called, it will assume that end of | ||
| 913 | * input has been cleanly reached if no input remains, | ||
| 914 | * but we have optimistically already consumed the | ||
| 915 | * comma if one exists. | ||
| 916 | */ | ||
| 917 | if (*opts == '\0') { | ||
| 918 | malloc_write("<jemalloc>: Conf string ends " | ||
| 919 | "with comma\n"); | ||
| 920 | had_conf_error = true; | ||
| 921 | } | ||
| 922 | *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; | ||
| 923 | accept = true; | ||
| 924 | break; | ||
| 925 | case '\0': | ||
| 926 | *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; | ||
| 927 | accept = true; | ||
| 928 | break; | ||
| 929 | default: | ||
| 930 | opts++; | ||
| 931 | break; | ||
| 932 | } | ||
| 933 | } | ||
| 934 | |||
| 935 | *opts_p = opts; | ||
| 936 | return false; | ||
| 937 | } | ||
| 938 | |||
| 939 | static void | ||
| 940 | malloc_abort_invalid_conf(void) { | ||
| 941 | assert(opt_abort_conf); | ||
| 942 | malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " | ||
| 943 | "value (see above).\n"); | ||
| 944 | abort(); | ||
| 945 | } | ||
| 946 | |||
| 947 | static void | ||
| 948 | malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, | ||
| 949 | size_t vlen) { | ||
| 950 | malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, | ||
| 951 | (int)vlen, v); | ||
| 952 | /* If abort_conf is set, error out after processing all options. */ | ||
| 953 | const char *experimental = "experimental_"; | ||
| 954 | if (strncmp(k, experimental, strlen(experimental)) == 0) { | ||
| 955 | /* However, tolerate experimental features. */ | ||
| 956 | return; | ||
| 957 | } | ||
| 958 | had_conf_error = true; | ||
| 959 | } | ||
| 960 | |||
| 961 | static void | ||
| 962 | malloc_slow_flag_init(void) { | ||
| 963 | /* | ||
| 964 | * Combine the runtime options into malloc_slow for fast path. Called | ||
| 965 | * after processing all the options. | ||
| 966 | */ | ||
| 967 | malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) | ||
| 968 | | (opt_junk_free ? flag_opt_junk_free : 0) | ||
| 969 | | (opt_zero ? flag_opt_zero : 0) | ||
| 970 | | (opt_utrace ? flag_opt_utrace : 0) | ||
| 971 | | (opt_xmalloc ? flag_opt_xmalloc : 0); | ||
| 972 | |||
| 973 | malloc_slow = (malloc_slow_flags != 0); | ||
| 974 | } | ||
| 975 | |||
| 976 | /* Number of sources for initializing malloc_conf */ | ||
| 977 | #define MALLOC_CONF_NSOURCES 5 | ||
| 978 | |||
| 979 | static const char * | ||
| 980 | obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { | ||
| 981 | if (config_debug) { | ||
| 982 | static unsigned read_source = 0; | ||
| 983 | /* | ||
| 984 | * Each source should only be read once, to minimize # of | ||
| 985 | * syscalls on init. | ||
| 986 | */ | ||
| 987 | assert(read_source++ == which_source); | ||
| 988 | } | ||
| 989 | assert(which_source < MALLOC_CONF_NSOURCES); | ||
| 990 | |||
| 991 | const char *ret; | ||
| 992 | switch (which_source) { | ||
| 993 | case 0: | ||
| 994 | ret = config_malloc_conf; | ||
| 995 | break; | ||
| 996 | case 1: | ||
| 997 | if (je_malloc_conf != NULL) { | ||
| 998 | /* Use options that were compiled into the program. */ | ||
| 999 | ret = je_malloc_conf; | ||
| 1000 | } else { | ||
| 1001 | /* No configuration specified. */ | ||
| 1002 | ret = NULL; | ||
| 1003 | } | ||
| 1004 | break; | ||
| 1005 | case 2: { | ||
| 1006 | ssize_t linklen = 0; | ||
| 1007 | #ifndef _WIN32 | ||
| 1008 | int saved_errno = errno; | ||
| 1009 | const char *linkname = | ||
| 1010 | # ifdef JEMALLOC_PREFIX | ||
| 1011 | "/etc/"JEMALLOC_PREFIX"malloc.conf" | ||
| 1012 | # else | ||
| 1013 | "/etc/malloc.conf" | ||
| 1014 | # endif | ||
| 1015 | ; | ||
| 1016 | |||
| 1017 | /* | ||
| 1018 | * Try to use the contents of the "/etc/malloc.conf" symbolic | ||
| 1019 | * link's name. | ||
| 1020 | */ | ||
| 1021 | #ifndef JEMALLOC_READLINKAT | ||
| 1022 | linklen = readlink(linkname, buf, PATH_MAX); | ||
| 1023 | #else | ||
| 1024 | linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX); | ||
| 1025 | #endif | ||
| 1026 | if (linklen == -1) { | ||
| 1027 | /* No configuration specified. */ | ||
| 1028 | linklen = 0; | ||
| 1029 | /* Restore errno. */ | ||
| 1030 | set_errno(saved_errno); | ||
| 1031 | } | ||
| 1032 | #endif | ||
| 1033 | buf[linklen] = '\0'; | ||
| 1034 | ret = buf; | ||
| 1035 | break; | ||
| 1036 | } case 3: { | ||
| 1037 | const char *envname = | ||
| 1038 | #ifdef JEMALLOC_PREFIX | ||
| 1039 | JEMALLOC_CPREFIX"MALLOC_CONF" | ||
| 1040 | #else | ||
| 1041 | "MALLOC_CONF" | ||
| 1042 | #endif | ||
| 1043 | ; | ||
| 1044 | |||
| 1045 | if ((ret = jemalloc_secure_getenv(envname)) != NULL) { | ||
| 1046 | /* | ||
| 1047 | * Do nothing; opts is already initialized to the value | ||
| 1048 | * of the MALLOC_CONF environment variable. | ||
| 1049 | */ | ||
| 1050 | } else { | ||
| 1051 | /* No configuration specified. */ | ||
| 1052 | ret = NULL; | ||
| 1053 | } | ||
| 1054 | break; | ||
| 1055 | } case 4: { | ||
| 1056 | ret = je_malloc_conf_2_conf_harder; | ||
| 1057 | break; | ||
| 1058 | } default: | ||
| 1059 | not_reached(); | ||
| 1060 | ret = NULL; | ||
| 1061 | } | ||
| 1062 | return ret; | ||
| 1063 | } | ||
| 1064 | |||
| 1065 | static void | ||
| 1066 | malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], | ||
| 1067 | bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES], | ||
| 1068 | char buf[PATH_MAX + 1]) { | ||
| 1069 | static const char *opts_explain[MALLOC_CONF_NSOURCES] = { | ||
| 1070 | "string specified via --with-malloc-conf", | ||
| 1071 | "string pointed to by the global variable malloc_conf", | ||
| 1072 | ("\"name\" of the file referenced by the symbolic link named " | ||
| 1073 | "/etc/malloc.conf"), | ||
| 1074 | "value of the environment variable MALLOC_CONF", | ||
| 1075 | ("string pointed to by the global variable " | ||
| 1076 | "malloc_conf_2_conf_harder"), | ||
| 1077 | }; | ||
| 1078 | unsigned i; | ||
| 1079 | const char *opts, *k, *v; | ||
| 1080 | size_t klen, vlen; | ||
| 1081 | |||
| 1082 | for (i = 0; i < MALLOC_CONF_NSOURCES; i++) { | ||
| 1083 | /* Get runtime configuration. */ | ||
| 1084 | if (initial_call) { | ||
| 1085 | opts_cache[i] = obtain_malloc_conf(i, buf); | ||
| 1086 | } | ||
| 1087 | opts = opts_cache[i]; | ||
| 1088 | if (!initial_call && opt_confirm_conf) { | ||
| 1089 | malloc_printf( | ||
| 1090 | "<jemalloc>: malloc_conf #%u (%s): \"%s\"\n", | ||
| 1091 | i + 1, opts_explain[i], opts != NULL ? opts : ""); | ||
| 1092 | } | ||
| 1093 | if (opts == NULL) { | ||
| 1094 | continue; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, | ||
| 1098 | &vlen)) { | ||
| 1099 | |||
| 1100 | #define CONF_ERROR(msg, k, klen, v, vlen) \ | ||
| 1101 | if (!initial_call) { \ | ||
| 1102 | malloc_conf_error( \ | ||
| 1103 | msg, k, klen, v, vlen); \ | ||
| 1104 | cur_opt_valid = false; \ | ||
| 1105 | } | ||
| 1106 | #define CONF_CONTINUE { \ | ||
| 1107 | if (!initial_call && opt_confirm_conf \ | ||
| 1108 | && cur_opt_valid) { \ | ||
| 1109 | malloc_printf("<jemalloc>: -- " \ | ||
| 1110 | "Set conf value: %.*s:%.*s" \ | ||
| 1111 | "\n", (int)klen, k, \ | ||
| 1112 | (int)vlen, v); \ | ||
| 1113 | } \ | ||
| 1114 | continue; \ | ||
| 1115 | } | ||
| 1116 | #define CONF_MATCH(n) \ | ||
| 1117 | (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) | ||
| 1118 | #define CONF_MATCH_VALUE(n) \ | ||
| 1119 | (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) | ||
| 1120 | #define CONF_HANDLE_BOOL(o, n) \ | ||
| 1121 | if (CONF_MATCH(n)) { \ | ||
| 1122 | if (CONF_MATCH_VALUE("true")) { \ | ||
| 1123 | o = true; \ | ||
| 1124 | } else if (CONF_MATCH_VALUE("false")) { \ | ||
| 1125 | o = false; \ | ||
| 1126 | } else { \ | ||
| 1127 | CONF_ERROR("Invalid conf value",\ | ||
| 1128 | k, klen, v, vlen); \ | ||
| 1129 | } \ | ||
| 1130 | CONF_CONTINUE; \ | ||
| 1131 | } | ||
| 1132 | /* | ||
| 1133 | * One of the CONF_MIN macros below expands, in one of the use points, | ||
| 1134 | * to "unsigned integer < 0", which is always false, triggering the | ||
| 1135 | * GCC -Wtype-limits warning, which we disable here and re-enable below. | ||
| 1136 | */ | ||
| 1137 | JEMALLOC_DIAGNOSTIC_PUSH | ||
| 1138 | JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS | ||
| 1139 | |||
| 1140 | #define CONF_DONT_CHECK_MIN(um, min) false | ||
| 1141 | #define CONF_CHECK_MIN(um, min) ((um) < (min)) | ||
| 1142 | #define CONF_DONT_CHECK_MAX(um, max) false | ||
| 1143 | #define CONF_CHECK_MAX(um, max) ((um) > (max)) | ||
| 1144 | |||
| 1145 | #define CONF_VALUE_READ(max_t, result) \ | ||
| 1146 | char *end; \ | ||
| 1147 | set_errno(0); \ | ||
| 1148 | result = (max_t)malloc_strtoumax(v, &end, 0); | ||
| 1149 | #define CONF_VALUE_READ_FAIL() \ | ||
| 1150 | (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen) | ||
| 1151 | |||
| 1152 | #define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \ | ||
| 1153 | if (CONF_MATCH(n)) { \ | ||
| 1154 | max_t mv; \ | ||
| 1155 | CONF_VALUE_READ(max_t, mv) \ | ||
| 1156 | if (CONF_VALUE_READ_FAIL()) { \ | ||
| 1157 | CONF_ERROR("Invalid conf value",\ | ||
| 1158 | k, klen, v, vlen); \ | ||
| 1159 | } else if (clip) { \ | ||
| 1160 | if (check_min(mv, (t)(min))) { \ | ||
| 1161 | o = (t)(min); \ | ||
| 1162 | } else if ( \ | ||
| 1163 | check_max(mv, (t)(max))) { \ | ||
| 1164 | o = (t)(max); \ | ||
| 1165 | } else { \ | ||
| 1166 | o = (t)mv; \ | ||
| 1167 | } \ | ||
| 1168 | } else { \ | ||
| 1169 | if (check_min(mv, (t)(min)) || \ | ||
| 1170 | check_max(mv, (t)(max))) { \ | ||
| 1171 | CONF_ERROR( \ | ||
| 1172 | "Out-of-range " \ | ||
| 1173 | "conf value", \ | ||
| 1174 | k, klen, v, vlen); \ | ||
| 1175 | } else { \ | ||
| 1176 | o = (t)mv; \ | ||
| 1177 | } \ | ||
| 1178 | } \ | ||
| 1179 | CONF_CONTINUE; \ | ||
| 1180 | } | ||
| 1181 | #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ | ||
| 1182 | CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \ | ||
| 1183 | check_max, clip) | ||
| 1184 | #define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\ | ||
| 1185 | CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \ | ||
| 1186 | check_max, clip) | ||
| 1187 | |||
| 1188 | #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ | ||
| 1189 | clip) \ | ||
| 1190 | CONF_HANDLE_T_U(unsigned, o, n, min, max, \ | ||
| 1191 | check_min, check_max, clip) | ||
| 1192 | #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ | ||
| 1193 | CONF_HANDLE_T_U(size_t, o, n, min, max, \ | ||
| 1194 | check_min, check_max, clip) | ||
| 1195 | #define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \ | ||
| 1196 | CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \ | ||
| 1197 | check_min, check_max, clip) | ||
| 1198 | #define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\ | ||
| 1199 | CONF_HANDLE_T_U(uint64_t, o, n, min, max, \ | ||
| 1200 | check_min, check_max, clip) | ||
| 1201 | #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ | ||
| 1202 | CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \ | ||
| 1203 | CONF_CHECK_MIN, CONF_CHECK_MAX, false) | ||
| 1204 | #define CONF_HANDLE_CHAR_P(o, n, d) \ | ||
| 1205 | if (CONF_MATCH(n)) { \ | ||
| 1206 | size_t cpylen = (vlen <= \ | ||
| 1207 | sizeof(o)-1) ? vlen : \ | ||
| 1208 | sizeof(o)-1; \ | ||
| 1209 | strncpy(o, v, cpylen); \ | ||
| 1210 | o[cpylen] = '\0'; \ | ||
| 1211 | CONF_CONTINUE; \ | ||
| 1212 | } | ||
| 1213 | |||
| 1214 | bool cur_opt_valid = true; | ||
| 1215 | |||
| 1216 | CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf") | ||
| 1217 | if (initial_call) { | ||
| 1218 | continue; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | CONF_HANDLE_BOOL(opt_abort, "abort") | ||
| 1222 | CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") | ||
| 1223 | CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise") | ||
| 1224 | if (strncmp("metadata_thp", k, klen) == 0) { | ||
| 1225 | int m; | ||
| 1226 | bool match = false; | ||
| 1227 | for (m = 0; m < metadata_thp_mode_limit; m++) { | ||
| 1228 | if (strncmp(metadata_thp_mode_names[m], | ||
| 1229 | v, vlen) == 0) { | ||
| 1230 | opt_metadata_thp = m; | ||
| 1231 | match = true; | ||
| 1232 | break; | ||
| 1233 | } | ||
| 1234 | } | ||
| 1235 | if (!match) { | ||
| 1236 | CONF_ERROR("Invalid conf value", | ||
| 1237 | k, klen, v, vlen); | ||
| 1238 | } | ||
| 1239 | CONF_CONTINUE; | ||
| 1240 | } | ||
| 1241 | CONF_HANDLE_BOOL(opt_retain, "retain") | ||
| 1242 | if (strncmp("dss", k, klen) == 0) { | ||
| 1243 | int m; | ||
| 1244 | bool match = false; | ||
| 1245 | for (m = 0; m < dss_prec_limit; m++) { | ||
| 1246 | if (strncmp(dss_prec_names[m], v, vlen) | ||
| 1247 | == 0) { | ||
| 1248 | if (extent_dss_prec_set(m)) { | ||
| 1249 | CONF_ERROR( | ||
| 1250 | "Error setting dss", | ||
| 1251 | k, klen, v, vlen); | ||
| 1252 | } else { | ||
| 1253 | opt_dss = | ||
| 1254 | dss_prec_names[m]; | ||
| 1255 | match = true; | ||
| 1256 | break; | ||
| 1257 | } | ||
| 1258 | } | ||
| 1259 | } | ||
| 1260 | if (!match) { | ||
| 1261 | CONF_ERROR("Invalid conf value", | ||
| 1262 | k, klen, v, vlen); | ||
| 1263 | } | ||
| 1264 | CONF_CONTINUE; | ||
| 1265 | } | ||
| 1266 | if (CONF_MATCH("narenas")) { | ||
| 1267 | if (CONF_MATCH_VALUE("default")) { | ||
| 1268 | opt_narenas = 0; | ||
| 1269 | CONF_CONTINUE; | ||
| 1270 | } else { | ||
| 1271 | CONF_HANDLE_UNSIGNED(opt_narenas, | ||
| 1272 | "narenas", 1, UINT_MAX, | ||
| 1273 | CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, | ||
| 1274 | /* clip */ false) | ||
| 1275 | } | ||
| 1276 | } | ||
| 1277 | if (CONF_MATCH("narenas_ratio")) { | ||
| 1278 | char *end; | ||
| 1279 | bool err = fxp_parse(&opt_narenas_ratio, v, | ||
| 1280 | &end); | ||
| 1281 | if (err || (size_t)(end - v) != vlen) { | ||
| 1282 | CONF_ERROR("Invalid conf value", | ||
| 1283 | k, klen, v, vlen); | ||
| 1284 | } | ||
| 1285 | CONF_CONTINUE; | ||
| 1286 | } | ||
| 1287 | if (CONF_MATCH("bin_shards")) { | ||
| 1288 | const char *bin_shards_segment_cur = v; | ||
| 1289 | size_t vlen_left = vlen; | ||
| 1290 | do { | ||
| 1291 | size_t size_start; | ||
| 1292 | size_t size_end; | ||
| 1293 | size_t nshards; | ||
| 1294 | bool err = malloc_conf_multi_sizes_next( | ||
| 1295 | &bin_shards_segment_cur, &vlen_left, | ||
| 1296 | &size_start, &size_end, &nshards); | ||
| 1297 | if (err || bin_update_shard_size( | ||
| 1298 | bin_shard_sizes, size_start, | ||
| 1299 | size_end, nshards)) { | ||
| 1300 | CONF_ERROR( | ||
| 1301 | "Invalid settings for " | ||
| 1302 | "bin_shards", k, klen, v, | ||
| 1303 | vlen); | ||
| 1304 | break; | ||
| 1305 | } | ||
| 1306 | } while (vlen_left > 0); | ||
| 1307 | CONF_CONTINUE; | ||
| 1308 | } | ||
| 1309 | CONF_HANDLE_INT64_T(opt_mutex_max_spin, | ||
| 1310 | "mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN, | ||
| 1311 | CONF_DONT_CHECK_MAX, false); | ||
| 1312 | CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, | ||
| 1313 | "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < | ||
| 1314 | QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : | ||
| 1315 | SSIZE_MAX); | ||
| 1316 | CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, | ||
| 1317 | "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < | ||
| 1318 | QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : | ||
| 1319 | SSIZE_MAX); | ||
| 1320 | CONF_HANDLE_BOOL(opt_stats_print, "stats_print") | ||
| 1321 | if (CONF_MATCH("stats_print_opts")) { | ||
| 1322 | init_opt_stats_opts(v, vlen, | ||
| 1323 | opt_stats_print_opts); | ||
| 1324 | CONF_CONTINUE; | ||
| 1325 | } | ||
| 1326 | CONF_HANDLE_INT64_T(opt_stats_interval, | ||
| 1327 | "stats_interval", -1, INT64_MAX, | ||
| 1328 | CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false) | ||
| 1329 | if (CONF_MATCH("stats_interval_opts")) { | ||
| 1330 | init_opt_stats_opts(v, vlen, | ||
| 1331 | opt_stats_interval_opts); | ||
| 1332 | CONF_CONTINUE; | ||
| 1333 | } | ||
| 1334 | if (config_fill) { | ||
| 1335 | if (CONF_MATCH("junk")) { | ||
| 1336 | if (CONF_MATCH_VALUE("true")) { | ||
| 1337 | opt_junk = "true"; | ||
| 1338 | opt_junk_alloc = opt_junk_free = | ||
| 1339 | true; | ||
| 1340 | } else if (CONF_MATCH_VALUE("false")) { | ||
| 1341 | opt_junk = "false"; | ||
| 1342 | opt_junk_alloc = opt_junk_free = | ||
| 1343 | false; | ||
| 1344 | } else if (CONF_MATCH_VALUE("alloc")) { | ||
| 1345 | opt_junk = "alloc"; | ||
| 1346 | opt_junk_alloc = true; | ||
| 1347 | opt_junk_free = false; | ||
| 1348 | } else if (CONF_MATCH_VALUE("free")) { | ||
| 1349 | opt_junk = "free"; | ||
| 1350 | opt_junk_alloc = false; | ||
| 1351 | opt_junk_free = true; | ||
| 1352 | } else { | ||
| 1353 | CONF_ERROR( | ||
| 1354 | "Invalid conf value", | ||
| 1355 | k, klen, v, vlen); | ||
| 1356 | } | ||
| 1357 | CONF_CONTINUE; | ||
| 1358 | } | ||
| 1359 | CONF_HANDLE_BOOL(opt_zero, "zero") | ||
| 1360 | } | ||
| 1361 | if (config_utrace) { | ||
| 1362 | CONF_HANDLE_BOOL(opt_utrace, "utrace") | ||
| 1363 | } | ||
| 1364 | if (config_xmalloc) { | ||
| 1365 | CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") | ||
| 1366 | } | ||
| 1367 | if (config_enable_cxx) { | ||
| 1368 | CONF_HANDLE_BOOL( | ||
| 1369 | opt_experimental_infallible_new, | ||
| 1370 | "experimental_infallible_new") | ||
| 1371 | } | ||
| 1372 | |||
| 1373 | CONF_HANDLE_BOOL(opt_tcache, "tcache") | ||
| 1374 | CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max", | ||
| 1375 | 0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN, | ||
| 1376 | CONF_CHECK_MAX, /* clip */ true) | ||
| 1377 | if (CONF_MATCH("lg_tcache_max")) { | ||
| 1378 | size_t m; | ||
| 1379 | CONF_VALUE_READ(size_t, m) | ||
| 1380 | if (CONF_VALUE_READ_FAIL()) { | ||
| 1381 | CONF_ERROR("Invalid conf value", | ||
| 1382 | k, klen, v, vlen); | ||
| 1383 | } else { | ||
| 1384 | /* clip if necessary */ | ||
| 1385 | if (m > TCACHE_LG_MAXCLASS_LIMIT) { | ||
| 1386 | m = TCACHE_LG_MAXCLASS_LIMIT; | ||
| 1387 | } | ||
| 1388 | opt_tcache_max = (size_t)1 << m; | ||
| 1389 | } | ||
| 1390 | CONF_CONTINUE; | ||
| 1391 | } | ||
| 1392 | /* | ||
| 1393 | * Anyone trying to set a value outside -16 to 16 is | ||
| 1394 | * deeply confused. | ||
| 1395 | */ | ||
| 1396 | CONF_HANDLE_SSIZE_T(opt_lg_tcache_nslots_mul, | ||
| 1397 | "lg_tcache_nslots_mul", -16, 16) | ||
| 1398 | /* Ditto with values past 2048. */ | ||
| 1399 | CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min, | ||
| 1400 | "tcache_nslots_small_min", 1, 2048, | ||
| 1401 | CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) | ||
| 1402 | CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max, | ||
| 1403 | "tcache_nslots_small_max", 1, 2048, | ||
| 1404 | CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) | ||
| 1405 | CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large, | ||
| 1406 | "tcache_nslots_large", 1, 2048, | ||
| 1407 | CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) | ||
| 1408 | CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes, | ||
| 1409 | "tcache_gc_incr_bytes", 1024, SIZE_T_MAX, | ||
| 1410 | CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, | ||
| 1411 | /* clip */ true) | ||
| 1412 | CONF_HANDLE_SIZE_T(opt_tcache_gc_delay_bytes, | ||
| 1413 | "tcache_gc_delay_bytes", 0, SIZE_T_MAX, | ||
| 1414 | CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, | ||
| 1415 | /* clip */ false) | ||
| 1416 | CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div, | ||
| 1417 | "lg_tcache_flush_small_div", 1, 16, | ||
| 1418 | CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) | ||
| 1419 | CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div, | ||
| 1420 | "lg_tcache_flush_large_div", 1, 16, | ||
| 1421 | CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) | ||
| 1422 | |||
| 1423 | /* | ||
| 1424 | * The runtime option of oversize_threshold remains | ||
| 1425 | * undocumented. It may be tweaked in the next major | ||
| 1426 | * release (6.0). The default value 8M is rather | ||
| 1427 | * conservative / safe. Tuning it further down may | ||
| 1428 | * improve fragmentation a bit more, but may also cause | ||
| 1429 | * contention on the huge arena. | ||
| 1430 | */ | ||
| 1431 | CONF_HANDLE_SIZE_T(opt_oversize_threshold, | ||
| 1432 | "oversize_threshold", 0, SC_LARGE_MAXCLASS, | ||
| 1433 | CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false) | ||
| 1434 | CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, | ||
| 1435 | "lg_extent_max_active_fit", 0, | ||
| 1436 | (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN, | ||
| 1437 | CONF_CHECK_MAX, false) | ||
| 1438 | |||
| 1439 | if (strncmp("percpu_arena", k, klen) == 0) { | ||
| 1440 | bool match = false; | ||
| 1441 | for (int m = percpu_arena_mode_names_base; m < | ||
| 1442 | percpu_arena_mode_names_limit; m++) { | ||
| 1443 | if (strncmp(percpu_arena_mode_names[m], | ||
| 1444 | v, vlen) == 0) { | ||
| 1445 | if (!have_percpu_arena) { | ||
| 1446 | CONF_ERROR( | ||
| 1447 | "No getcpu support", | ||
| 1448 | k, klen, v, vlen); | ||
| 1449 | } | ||
| 1450 | opt_percpu_arena = m; | ||
| 1451 | match = true; | ||
| 1452 | break; | ||
| 1453 | } | ||
| 1454 | } | ||
| 1455 | if (!match) { | ||
| 1456 | CONF_ERROR("Invalid conf value", | ||
| 1457 | k, klen, v, vlen); | ||
| 1458 | } | ||
| 1459 | CONF_CONTINUE; | ||
| 1460 | } | ||
| 1461 | CONF_HANDLE_BOOL(opt_background_thread, | ||
| 1462 | "background_thread"); | ||
| 1463 | CONF_HANDLE_SIZE_T(opt_max_background_threads, | ||
| 1464 | "max_background_threads", 1, | ||
| 1465 | opt_max_background_threads, | ||
| 1466 | CONF_CHECK_MIN, CONF_CHECK_MAX, | ||
| 1467 | true); | ||
| 1468 | CONF_HANDLE_BOOL(opt_hpa, "hpa") | ||
| 1469 | CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc, | ||
| 1470 | "hpa_slab_max_alloc", PAGE, HUGEPAGE, | ||
| 1471 | CONF_CHECK_MIN, CONF_CHECK_MAX, true); | ||
| 1472 | |||
| 1473 | /* | ||
| 1474 | * Accept either a ratio-based or an exact hugification | ||
| 1475 | * threshold. | ||
| 1476 | */ | ||
| 1477 | CONF_HANDLE_SIZE_T(opt_hpa_opts.hugification_threshold, | ||
| 1478 | "hpa_hugification_threshold", PAGE, HUGEPAGE, | ||
| 1479 | CONF_CHECK_MIN, CONF_CHECK_MAX, true); | ||
| 1480 | if (CONF_MATCH("hpa_hugification_threshold_ratio")) { | ||
| 1481 | fxp_t ratio; | ||
| 1482 | char *end; | ||
| 1483 | bool err = fxp_parse(&ratio, v, | ||
| 1484 | &end); | ||
| 1485 | if (err || (size_t)(end - v) != vlen | ||
| 1486 | || ratio > FXP_INIT_INT(1)) { | ||
| 1487 | CONF_ERROR("Invalid conf value", | ||
| 1488 | k, klen, v, vlen); | ||
| 1489 | } else { | ||
| 1490 | opt_hpa_opts.hugification_threshold = | ||
| 1491 | fxp_mul_frac(HUGEPAGE, ratio); | ||
| 1492 | } | ||
| 1493 | CONF_CONTINUE; | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | CONF_HANDLE_UINT64_T( | ||
| 1497 | opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms", | ||
| 1498 | 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, | ||
| 1499 | false); | ||
| 1500 | |||
| 1501 | CONF_HANDLE_UINT64_T( | ||
| 1502 | opt_hpa_opts.min_purge_interval_ms, | ||
| 1503 | "hpa_min_purge_interval_ms", 0, 0, | ||
| 1504 | CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false); | ||
| 1505 | |||
| 1506 | if (CONF_MATCH("hpa_dirty_mult")) { | ||
| 1507 | if (CONF_MATCH_VALUE("-1")) { | ||
| 1508 | opt_hpa_opts.dirty_mult = (fxp_t)-1; | ||
| 1509 | CONF_CONTINUE; | ||
| 1510 | } | ||
| 1511 | fxp_t ratio; | ||
| 1512 | char *end; | ||
| 1513 | bool err = fxp_parse(&ratio, v, | ||
| 1514 | &end); | ||
| 1515 | if (err || (size_t)(end - v) != vlen) { | ||
| 1516 | CONF_ERROR("Invalid conf value", | ||
| 1517 | k, klen, v, vlen); | ||
| 1518 | } else { | ||
| 1519 | opt_hpa_opts.dirty_mult = ratio; | ||
| 1520 | } | ||
| 1521 | CONF_CONTINUE; | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.nshards, | ||
| 1525 | "hpa_sec_nshards", 0, 0, CONF_CHECK_MIN, | ||
| 1526 | CONF_DONT_CHECK_MAX, true); | ||
| 1527 | CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc, | ||
| 1528 | "hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN, | ||
| 1529 | CONF_DONT_CHECK_MAX, true); | ||
| 1530 | CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes, | ||
| 1531 | "hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN, | ||
| 1532 | CONF_DONT_CHECK_MAX, true); | ||
| 1533 | CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.bytes_after_flush, | ||
| 1534 | "hpa_sec_bytes_after_flush", PAGE, 0, | ||
| 1535 | CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true); | ||
| 1536 | CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra, | ||
| 1537 | "hpa_sec_batch_fill_extra", 0, HUGEPAGE_PAGES, | ||
| 1538 | CONF_CHECK_MIN, CONF_CHECK_MAX, true); | ||
| 1539 | |||
| 1540 | if (CONF_MATCH("slab_sizes")) { | ||
| 1541 | if (CONF_MATCH_VALUE("default")) { | ||
| 1542 | sc_data_init(sc_data); | ||
| 1543 | CONF_CONTINUE; | ||
| 1544 | } | ||
| 1545 | bool err; | ||
| 1546 | const char *slab_size_segment_cur = v; | ||
| 1547 | size_t vlen_left = vlen; | ||
| 1548 | do { | ||
| 1549 | size_t slab_start; | ||
| 1550 | size_t slab_end; | ||
| 1551 | size_t pgs; | ||
| 1552 | err = malloc_conf_multi_sizes_next( | ||
| 1553 | &slab_size_segment_cur, | ||
| 1554 | &vlen_left, &slab_start, &slab_end, | ||
| 1555 | &pgs); | ||
| 1556 | if (!err) { | ||
| 1557 | sc_data_update_slab_size( | ||
| 1558 | sc_data, slab_start, | ||
| 1559 | slab_end, (int)pgs); | ||
| 1560 | } else { | ||
| 1561 | CONF_ERROR("Invalid settings " | ||
| 1562 | "for slab_sizes", | ||
| 1563 | k, klen, v, vlen); | ||
| 1564 | } | ||
| 1565 | } while (!err && vlen_left > 0); | ||
| 1566 | CONF_CONTINUE; | ||
| 1567 | } | ||
| 1568 | if (config_prof) { | ||
| 1569 | CONF_HANDLE_BOOL(opt_prof, "prof") | ||
| 1570 | CONF_HANDLE_CHAR_P(opt_prof_prefix, | ||
| 1571 | "prof_prefix", "jeprof") | ||
| 1572 | CONF_HANDLE_BOOL(opt_prof_active, "prof_active") | ||
| 1573 | CONF_HANDLE_BOOL(opt_prof_thread_active_init, | ||
| 1574 | "prof_thread_active_init") | ||
| 1575 | CONF_HANDLE_SIZE_T(opt_lg_prof_sample, | ||
| 1576 | "lg_prof_sample", 0, (sizeof(uint64_t) << 3) | ||
| 1577 | - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, | ||
| 1578 | true) | ||
| 1579 | CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") | ||
| 1580 | CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, | ||
| 1581 | "lg_prof_interval", -1, | ||
| 1582 | (sizeof(uint64_t) << 3) - 1) | ||
| 1583 | CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") | ||
| 1584 | CONF_HANDLE_BOOL(opt_prof_final, "prof_final") | ||
| 1585 | CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") | ||
| 1586 | CONF_HANDLE_BOOL(opt_prof_leak_error, | ||
| 1587 | "prof_leak_error") | ||
| 1588 | CONF_HANDLE_BOOL(opt_prof_log, "prof_log") | ||
| 1589 | CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max, | ||
| 1590 | "prof_recent_alloc_max", -1, SSIZE_MAX) | ||
| 1591 | CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats") | ||
| 1592 | CONF_HANDLE_BOOL(opt_prof_sys_thread_name, | ||
| 1593 | "prof_sys_thread_name") | ||
| 1594 | if (CONF_MATCH("prof_time_resolution")) { | ||
| 1595 | if (CONF_MATCH_VALUE("default")) { | ||
| 1596 | opt_prof_time_res = | ||
| 1597 | prof_time_res_default; | ||
| 1598 | } else if (CONF_MATCH_VALUE("high")) { | ||
| 1599 | if (!config_high_res_timer) { | ||
| 1600 | CONF_ERROR( | ||
| 1601 | "No high resolution" | ||
| 1602 | " timer support", | ||
| 1603 | k, klen, v, vlen); | ||
| 1604 | } else { | ||
| 1605 | opt_prof_time_res = | ||
| 1606 | prof_time_res_high; | ||
| 1607 | } | ||
| 1608 | } else { | ||
| 1609 | CONF_ERROR("Invalid conf value", | ||
| 1610 | k, klen, v, vlen); | ||
| 1611 | } | ||
| 1612 | CONF_CONTINUE; | ||
| 1613 | } | ||
| 1614 | /* | ||
| 1615 | * Undocumented. When set to false, don't | ||
| 1616 | * correct for an unbiasing bug in jeprof | ||
| 1617 | * attribution. This can be handy if you want | ||
| 1618 | * to get consistent numbers from your binary | ||
| 1619 | * across different jemalloc versions, even if | ||
| 1620 | * those numbers are incorrect. The default is | ||
| 1621 | * true. | ||
| 1622 | */ | ||
| 1623 | CONF_HANDLE_BOOL(opt_prof_unbias, "prof_unbias") | ||
| 1624 | } | ||
| 1625 | if (config_log) { | ||
| 1626 | if (CONF_MATCH("log")) { | ||
| 1627 | size_t cpylen = ( | ||
| 1628 | vlen <= sizeof(log_var_names) ? | ||
| 1629 | vlen : sizeof(log_var_names) - 1); | ||
| 1630 | strncpy(log_var_names, v, cpylen); | ||
| 1631 | log_var_names[cpylen] = '\0'; | ||
| 1632 | CONF_CONTINUE; | ||
| 1633 | } | ||
| 1634 | } | ||
| 1635 | if (CONF_MATCH("thp")) { | ||
| 1636 | bool match = false; | ||
| 1637 | for (int m = 0; m < thp_mode_names_limit; m++) { | ||
| 1638 | if (strncmp(thp_mode_names[m],v, vlen) | ||
| 1639 | == 0) { | ||
| 1640 | if (!have_madvise_huge && !have_memcntl) { | ||
| 1641 | CONF_ERROR( | ||
| 1642 | "No THP support", | ||
| 1643 | k, klen, v, vlen); | ||
| 1644 | } | ||
| 1645 | opt_thp = m; | ||
| 1646 | match = true; | ||
| 1647 | break; | ||
| 1648 | } | ||
| 1649 | } | ||
| 1650 | if (!match) { | ||
| 1651 | CONF_ERROR("Invalid conf value", | ||
| 1652 | k, klen, v, vlen); | ||
| 1653 | } | ||
| 1654 | CONF_CONTINUE; | ||
| 1655 | } | ||
| 1656 | if (CONF_MATCH("zero_realloc")) { | ||
| 1657 | if (CONF_MATCH_VALUE("alloc")) { | ||
| 1658 | opt_zero_realloc_action | ||
| 1659 | = zero_realloc_action_alloc; | ||
| 1660 | } else if (CONF_MATCH_VALUE("free")) { | ||
| 1661 | opt_zero_realloc_action | ||
| 1662 | = zero_realloc_action_free; | ||
| 1663 | } else if (CONF_MATCH_VALUE("abort")) { | ||
| 1664 | opt_zero_realloc_action | ||
| 1665 | = zero_realloc_action_abort; | ||
| 1666 | } else { | ||
| 1667 | CONF_ERROR("Invalid conf value", | ||
| 1668 | k, klen, v, vlen); | ||
| 1669 | } | ||
| 1670 | CONF_CONTINUE; | ||
| 1671 | } | ||
| 1672 | if (config_uaf_detection && | ||
| 1673 | CONF_MATCH("lg_san_uaf_align")) { | ||
| 1674 | ssize_t a; | ||
| 1675 | CONF_VALUE_READ(ssize_t, a) | ||
| 1676 | if (CONF_VALUE_READ_FAIL() || a < -1) { | ||
| 1677 | CONF_ERROR("Invalid conf value", | ||
| 1678 | k, klen, v, vlen); | ||
| 1679 | } | ||
| 1680 | if (a == -1) { | ||
| 1681 | opt_lg_san_uaf_align = -1; | ||
| 1682 | CONF_CONTINUE; | ||
| 1683 | } | ||
| 1684 | |||
| 1685 | /* clip if necessary */ | ||
| 1686 | ssize_t max_allowed = (sizeof(size_t) << 3) - 1; | ||
| 1687 | ssize_t min_allowed = LG_PAGE; | ||
| 1688 | if (a > max_allowed) { | ||
| 1689 | a = max_allowed; | ||
| 1690 | } else if (a < min_allowed) { | ||
| 1691 | a = min_allowed; | ||
| 1692 | } | ||
| 1693 | |||
| 1694 | opt_lg_san_uaf_align = a; | ||
| 1695 | CONF_CONTINUE; | ||
| 1696 | } | ||
| 1697 | |||
| 1698 | CONF_HANDLE_SIZE_T(opt_san_guard_small, | ||
| 1699 | "san_guard_small", 0, SIZE_T_MAX, | ||
| 1700 | CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false) | ||
| 1701 | CONF_HANDLE_SIZE_T(opt_san_guard_large, | ||
| 1702 | "san_guard_large", 0, SIZE_T_MAX, | ||
| 1703 | CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false) | ||
| 1704 | |||
| 1705 | CONF_ERROR("Invalid conf pair", k, klen, v, vlen); | ||
| 1706 | #undef CONF_ERROR | ||
| 1707 | #undef CONF_CONTINUE | ||
| 1708 | #undef CONF_MATCH | ||
| 1709 | #undef CONF_MATCH_VALUE | ||
| 1710 | #undef CONF_HANDLE_BOOL | ||
| 1711 | #undef CONF_DONT_CHECK_MIN | ||
| 1712 | #undef CONF_CHECK_MIN | ||
| 1713 | #undef CONF_DONT_CHECK_MAX | ||
| 1714 | #undef CONF_CHECK_MAX | ||
| 1715 | #undef CONF_HANDLE_T | ||
| 1716 | #undef CONF_HANDLE_T_U | ||
| 1717 | #undef CONF_HANDLE_T_SIGNED | ||
| 1718 | #undef CONF_HANDLE_UNSIGNED | ||
| 1719 | #undef CONF_HANDLE_SIZE_T | ||
| 1720 | #undef CONF_HANDLE_SSIZE_T | ||
| 1721 | #undef CONF_HANDLE_CHAR_P | ||
| 1722 | /* Re-enable diagnostic "-Wtype-limits" */ | ||
| 1723 | JEMALLOC_DIAGNOSTIC_POP | ||
| 1724 | } | ||
| 1725 | if (opt_abort_conf && had_conf_error) { | ||
| 1726 | malloc_abort_invalid_conf(); | ||
| 1727 | } | ||
| 1728 | } | ||
| 1729 | atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); | ||
| 1730 | } | ||
| 1731 | |||
| 1732 | static bool | ||
| 1733 | malloc_conf_init_check_deps(void) { | ||
| 1734 | if (opt_prof_leak_error && !opt_prof_final) { | ||
| 1735 | malloc_printf("<jemalloc>: prof_leak_error is set w/o " | ||
| 1736 | "prof_final.\n"); | ||
| 1737 | return true; | ||
| 1738 | } | ||
| 1739 | |||
| 1740 | return false; | ||
| 1741 | } | ||
| 1742 | |||
| 1743 | static void | ||
| 1744 | malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { | ||
| 1745 | const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL, | ||
| 1746 | NULL}; | ||
| 1747 | char buf[PATH_MAX + 1]; | ||
| 1748 | |||
| 1749 | /* The first call only set the confirm_conf option and opts_cache */ | ||
| 1750 | malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf); | ||
| 1751 | malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache, | ||
| 1752 | NULL); | ||
| 1753 | if (malloc_conf_init_check_deps()) { | ||
| 1754 | /* check_deps does warning msg only; abort below if needed. */ | ||
| 1755 | if (opt_abort_conf) { | ||
| 1756 | malloc_abort_invalid_conf(); | ||
| 1757 | } | ||
| 1758 | } | ||
| 1759 | } | ||
| 1760 | |||
| 1761 | #undef MALLOC_CONF_NSOURCES | ||
| 1762 | |||
| 1763 | static bool | ||
| 1764 | malloc_init_hard_needed(void) { | ||
| 1765 | if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == | ||
| 1766 | malloc_init_recursible)) { | ||
| 1767 | /* | ||
| 1768 | * Another thread initialized the allocator before this one | ||
| 1769 | * acquired init_lock, or this thread is the initializing | ||
| 1770 | * thread, and it is recursively allocating. | ||
| 1771 | */ | ||
| 1772 | return false; | ||
| 1773 | } | ||
| 1774 | #ifdef JEMALLOC_THREADED_INIT | ||
| 1775 | if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { | ||
| 1776 | /* Busy-wait until the initializing thread completes. */ | ||
| 1777 | spin_t spinner = SPIN_INITIALIZER; | ||
| 1778 | do { | ||
| 1779 | malloc_mutex_unlock(TSDN_NULL, &init_lock); | ||
| 1780 | spin_adaptive(&spinner); | ||
| 1781 | malloc_mutex_lock(TSDN_NULL, &init_lock); | ||
| 1782 | } while (!malloc_initialized()); | ||
| 1783 | return false; | ||
| 1784 | } | ||
| 1785 | #endif | ||
| 1786 | return true; | ||
| 1787 | } | ||
| 1788 | |||
| 1789 | static bool | ||
| 1790 | malloc_init_hard_a0_locked() { | ||
| 1791 | malloc_initializer = INITIALIZER; | ||
| 1792 | |||
| 1793 | JEMALLOC_DIAGNOSTIC_PUSH | ||
| 1794 | JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS | ||
| 1795 | sc_data_t sc_data = {0}; | ||
| 1796 | JEMALLOC_DIAGNOSTIC_POP | ||
| 1797 | |||
| 1798 | /* | ||
| 1799 | * Ordering here is somewhat tricky; we need sc_boot() first, since that | ||
| 1800 | * determines what the size classes will be, and then | ||
| 1801 | * malloc_conf_init(), since any slab size tweaking will need to be done | ||
| 1802 | * before sz_boot and bin_info_boot, which assume that the values they | ||
| 1803 | * read out of sc_data_global are final. | ||
| 1804 | */ | ||
| 1805 | sc_boot(&sc_data); | ||
| 1806 | unsigned bin_shard_sizes[SC_NBINS]; | ||
| 1807 | bin_shard_sizes_boot(bin_shard_sizes); | ||
| 1808 | /* | ||
| 1809 | * prof_boot0 only initializes opt_prof_prefix. We need to do it before | ||
| 1810 | * we parse malloc_conf options, in case malloc_conf parsing overwrites | ||
| 1811 | * it. | ||
| 1812 | */ | ||
| 1813 | if (config_prof) { | ||
| 1814 | prof_boot0(); | ||
| 1815 | } | ||
| 1816 | malloc_conf_init(&sc_data, bin_shard_sizes); | ||
| 1817 | san_init(opt_lg_san_uaf_align); | ||
| 1818 | sz_boot(&sc_data, opt_cache_oblivious); | ||
| 1819 | bin_info_boot(&sc_data, bin_shard_sizes); | ||
| 1820 | |||
| 1821 | if (opt_stats_print) { | ||
| 1822 | /* Print statistics at exit. */ | ||
| 1823 | if (atexit(stats_print_atexit) != 0) { | ||
| 1824 | malloc_write("<jemalloc>: Error in atexit()\n"); | ||
| 1825 | if (opt_abort) { | ||
| 1826 | abort(); | ||
| 1827 | } | ||
| 1828 | } | ||
| 1829 | } | ||
| 1830 | |||
| 1831 | if (stats_boot()) { | ||
| 1832 | return true; | ||
| 1833 | } | ||
| 1834 | if (pages_boot()) { | ||
| 1835 | return true; | ||
| 1836 | } | ||
| 1837 | if (base_boot(TSDN_NULL)) { | ||
| 1838 | return true; | ||
| 1839 | } | ||
| 1840 | /* emap_global is static, hence zeroed. */ | ||
| 1841 | if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) { | ||
| 1842 | return true; | ||
| 1843 | } | ||
| 1844 | if (extent_boot()) { | ||
| 1845 | return true; | ||
| 1846 | } | ||
| 1847 | if (ctl_boot()) { | ||
| 1848 | return true; | ||
| 1849 | } | ||
| 1850 | if (config_prof) { | ||
| 1851 | prof_boot1(); | ||
| 1852 | } | ||
| 1853 | if (opt_hpa && !hpa_supported()) { | ||
| 1854 | malloc_printf("<jemalloc>: HPA not supported in the current " | ||
| 1855 | "configuration; %s.", | ||
| 1856 | opt_abort_conf ? "aborting" : "disabling"); | ||
| 1857 | if (opt_abort_conf) { | ||
| 1858 | malloc_abort_invalid_conf(); | ||
| 1859 | } else { | ||
| 1860 | opt_hpa = false; | ||
| 1861 | } | ||
| 1862 | } | ||
| 1863 | if (arena_boot(&sc_data, b0get(), opt_hpa)) { | ||
| 1864 | return true; | ||
| 1865 | } | ||
| 1866 | if (tcache_boot(TSDN_NULL, b0get())) { | ||
| 1867 | return true; | ||
| 1868 | } | ||
| 1869 | if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, | ||
| 1870 | malloc_mutex_rank_exclusive)) { | ||
| 1871 | return true; | ||
| 1872 | } | ||
| 1873 | hook_boot(); | ||
| 1874 | /* | ||
| 1875 | * Create enough scaffolding to allow recursive allocation in | ||
| 1876 | * malloc_ncpus(). | ||
| 1877 | */ | ||
| 1878 | narenas_auto = 1; | ||
| 1879 | manual_arena_base = narenas_auto + 1; | ||
| 1880 | memset(arenas, 0, sizeof(arena_t *) * narenas_auto); | ||
| 1881 | /* | ||
| 1882 | * Initialize one arena here. The rest are lazily created in | ||
| 1883 | * arena_choose_hard(). | ||
| 1884 | */ | ||
| 1885 | if (arena_init(TSDN_NULL, 0, &arena_config_default) == NULL) { | ||
| 1886 | return true; | ||
| 1887 | } | ||
| 1888 | a0 = arena_get(TSDN_NULL, 0, false); | ||
| 1889 | |||
| 1890 | if (opt_hpa && !hpa_supported()) { | ||
| 1891 | malloc_printf("<jemalloc>: HPA not supported in the current " | ||
| 1892 | "configuration; %s.", | ||
| 1893 | opt_abort_conf ? "aborting" : "disabling"); | ||
| 1894 | if (opt_abort_conf) { | ||
| 1895 | malloc_abort_invalid_conf(); | ||
| 1896 | } else { | ||
| 1897 | opt_hpa = false; | ||
| 1898 | } | ||
| 1899 | } else if (opt_hpa) { | ||
| 1900 | hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts; | ||
| 1901 | hpa_shard_opts.deferral_allowed = background_thread_enabled(); | ||
| 1902 | if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard, | ||
| 1903 | &hpa_shard_opts, &opt_hpa_sec_opts)) { | ||
| 1904 | return true; | ||
| 1905 | } | ||
| 1906 | } | ||
| 1907 | |||
| 1908 | malloc_init_state = malloc_init_a0_initialized; | ||
| 1909 | |||
| 1910 | return false; | ||
| 1911 | } | ||
| 1912 | |||
| 1913 | static bool | ||
| 1914 | malloc_init_hard_a0(void) { | ||
| 1915 | bool ret; | ||
| 1916 | |||
| 1917 | malloc_mutex_lock(TSDN_NULL, &init_lock); | ||
| 1918 | ret = malloc_init_hard_a0_locked(); | ||
| 1919 | malloc_mutex_unlock(TSDN_NULL, &init_lock); | ||
| 1920 | return ret; | ||
| 1921 | } | ||
| 1922 | |||
| 1923 | /* Initialize data structures which may trigger recursive allocation. */ | ||
| 1924 | static bool | ||
| 1925 | malloc_init_hard_recursible(void) { | ||
| 1926 | malloc_init_state = malloc_init_recursible; | ||
| 1927 | |||
| 1928 | ncpus = malloc_ncpus(); | ||
| 1929 | if (opt_percpu_arena != percpu_arena_disabled) { | ||
| 1930 | bool cpu_count_is_deterministic = | ||
| 1931 | malloc_cpu_count_is_deterministic(); | ||
| 1932 | if (!cpu_count_is_deterministic) { | ||
| 1933 | /* | ||
| 1934 | * If # of CPU is not deterministic, and narenas not | ||
| 1935 | * specified, disables per cpu arena since it may not | ||
| 1936 | * detect CPU IDs properly. | ||
| 1937 | */ | ||
| 1938 | if (opt_narenas == 0) { | ||
| 1939 | opt_percpu_arena = percpu_arena_disabled; | ||
| 1940 | malloc_write("<jemalloc>: Number of CPUs " | ||
| 1941 | "detected is not deterministic. Per-CPU " | ||
| 1942 | "arena disabled.\n"); | ||
| 1943 | if (opt_abort_conf) { | ||
| 1944 | malloc_abort_invalid_conf(); | ||
| 1945 | } | ||
| 1946 | if (opt_abort) { | ||
| 1947 | abort(); | ||
| 1948 | } | ||
| 1949 | } | ||
| 1950 | } | ||
| 1951 | } | ||
| 1952 | |||
| 1953 | #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ | ||
| 1954 | && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ | ||
| 1955 | !defined(__native_client__)) | ||
| 1956 | /* LinuxThreads' pthread_atfork() allocates. */ | ||
| 1957 | if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, | ||
| 1958 | jemalloc_postfork_child) != 0) { | ||
| 1959 | malloc_write("<jemalloc>: Error in pthread_atfork()\n"); | ||
| 1960 | if (opt_abort) { | ||
| 1961 | abort(); | ||
| 1962 | } | ||
| 1963 | return true; | ||
| 1964 | } | ||
| 1965 | #endif | ||
| 1966 | |||
| 1967 | if (background_thread_boot0()) { | ||
| 1968 | return true; | ||
| 1969 | } | ||
| 1970 | |||
| 1971 | return false; | ||
| 1972 | } | ||
| 1973 | |||
| 1974 | static unsigned | ||
| 1975 | malloc_narenas_default(void) { | ||
| 1976 | assert(ncpus > 0); | ||
| 1977 | /* | ||
| 1978 | * For SMP systems, create more than one arena per CPU by | ||
| 1979 | * default. | ||
| 1980 | */ | ||
| 1981 | if (ncpus > 1) { | ||
| 1982 | fxp_t fxp_ncpus = FXP_INIT_INT(ncpus); | ||
| 1983 | fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio); | ||
| 1984 | uint32_t int_goal = fxp_round_nearest(goal); | ||
| 1985 | if (int_goal == 0) { | ||
| 1986 | return 1; | ||
| 1987 | } | ||
| 1988 | return int_goal; | ||
| 1989 | } else { | ||
| 1990 | return 1; | ||
| 1991 | } | ||
| 1992 | } | ||
| 1993 | |||
| 1994 | static percpu_arena_mode_t | ||
| 1995 | percpu_arena_as_initialized(percpu_arena_mode_t mode) { | ||
| 1996 | assert(!malloc_initialized()); | ||
| 1997 | assert(mode <= percpu_arena_disabled); | ||
| 1998 | |||
| 1999 | if (mode != percpu_arena_disabled) { | ||
| 2000 | mode += percpu_arena_mode_enabled_base; | ||
| 2001 | } | ||
| 2002 | |||
| 2003 | return mode; | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | static bool | ||
| 2007 | malloc_init_narenas(void) { | ||
| 2008 | assert(ncpus > 0); | ||
| 2009 | |||
| 2010 | if (opt_percpu_arena != percpu_arena_disabled) { | ||
| 2011 | if (!have_percpu_arena || malloc_getcpu() < 0) { | ||
| 2012 | opt_percpu_arena = percpu_arena_disabled; | ||
| 2013 | malloc_printf("<jemalloc>: perCPU arena getcpu() not " | ||
| 2014 | "available. Setting narenas to %u.\n", opt_narenas ? | ||
| 2015 | opt_narenas : malloc_narenas_default()); | ||
| 2016 | if (opt_abort) { | ||
| 2017 | abort(); | ||
| 2018 | } | ||
| 2019 | } else { | ||
| 2020 | if (ncpus >= MALLOCX_ARENA_LIMIT) { | ||
| 2021 | malloc_printf("<jemalloc>: narenas w/ percpu" | ||
| 2022 | "arena beyond limit (%d)\n", ncpus); | ||
| 2023 | if (opt_abort) { | ||
| 2024 | abort(); | ||
| 2025 | } | ||
| 2026 | return true; | ||
| 2027 | } | ||
| 2028 | /* NB: opt_percpu_arena isn't fully initialized yet. */ | ||
| 2029 | if (percpu_arena_as_initialized(opt_percpu_arena) == | ||
| 2030 | per_phycpu_arena && ncpus % 2 != 0) { | ||
| 2031 | malloc_printf("<jemalloc>: invalid " | ||
| 2032 | "configuration -- per physical CPU arena " | ||
| 2033 | "with odd number (%u) of CPUs (no hyper " | ||
| 2034 | "threading?).\n", ncpus); | ||
| 2035 | if (opt_abort) | ||
| 2036 | abort(); | ||
| 2037 | } | ||
| 2038 | unsigned n = percpu_arena_ind_limit( | ||
| 2039 | percpu_arena_as_initialized(opt_percpu_arena)); | ||
| 2040 | if (opt_narenas < n) { | ||
| 2041 | /* | ||
| 2042 | * If narenas is specified with percpu_arena | ||
| 2043 | * enabled, actual narenas is set as the greater | ||
| 2044 | * of the two. percpu_arena_choose will be free | ||
| 2045 | * to use any of the arenas based on CPU | ||
| 2046 | * id. This is conservative (at a small cost) | ||
| 2047 | * but ensures correctness. | ||
| 2048 | * | ||
| 2049 | * If for some reason the ncpus determined at | ||
| 2050 | * boot is not the actual number (e.g. because | ||
| 2051 | * of affinity setting from numactl), reserving | ||
| 2052 | * narenas this way provides a workaround for | ||
| 2053 | * percpu_arena. | ||
| 2054 | */ | ||
| 2055 | opt_narenas = n; | ||
| 2056 | } | ||
| 2057 | } | ||
| 2058 | } | ||
| 2059 | if (opt_narenas == 0) { | ||
| 2060 | opt_narenas = malloc_narenas_default(); | ||
| 2061 | } | ||
| 2062 | assert(opt_narenas > 0); | ||
| 2063 | |||
| 2064 | narenas_auto = opt_narenas; | ||
| 2065 | /* | ||
| 2066 | * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). | ||
| 2067 | */ | ||
| 2068 | if (narenas_auto >= MALLOCX_ARENA_LIMIT) { | ||
| 2069 | narenas_auto = MALLOCX_ARENA_LIMIT - 1; | ||
| 2070 | malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", | ||
| 2071 | narenas_auto); | ||
| 2072 | } | ||
| 2073 | narenas_total_set(narenas_auto); | ||
| 2074 | if (arena_init_huge()) { | ||
| 2075 | narenas_total_inc(); | ||
| 2076 | } | ||
| 2077 | manual_arena_base = narenas_total_get(); | ||
| 2078 | |||
| 2079 | return false; | ||
| 2080 | } | ||
| 2081 | |||
| 2082 | static void | ||
| 2083 | malloc_init_percpu(void) { | ||
| 2084 | opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); | ||
| 2085 | } | ||
| 2086 | |||
| 2087 | static bool | ||
| 2088 | malloc_init_hard_finish(void) { | ||
| 2089 | if (malloc_mutex_boot()) { | ||
| 2090 | return true; | ||
| 2091 | } | ||
| 2092 | |||
| 2093 | malloc_init_state = malloc_init_initialized; | ||
| 2094 | malloc_slow_flag_init(); | ||
| 2095 | |||
| 2096 | return false; | ||
| 2097 | } | ||
| 2098 | |||
| 2099 | static void | ||
| 2100 | malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { | ||
| 2101 | malloc_mutex_assert_owner(tsdn, &init_lock); | ||
| 2102 | malloc_mutex_unlock(tsdn, &init_lock); | ||
| 2103 | if (reentrancy_set) { | ||
| 2104 | assert(!tsdn_null(tsdn)); | ||
| 2105 | tsd_t *tsd = tsdn_tsd(tsdn); | ||
| 2106 | assert(tsd_reentrancy_level_get(tsd) > 0); | ||
| 2107 | post_reentrancy(tsd); | ||
| 2108 | } | ||
| 2109 | } | ||
| 2110 | |||
| 2111 | static bool | ||
| 2112 | malloc_init_hard(void) { | ||
| 2113 | tsd_t *tsd; | ||
| 2114 | |||
| 2115 | #if defined(_WIN32) && _WIN32_WINNT < 0x0600 | ||
| 2116 | _init_init_lock(); | ||
| 2117 | #endif | ||
| 2118 | malloc_mutex_lock(TSDN_NULL, &init_lock); | ||
| 2119 | |||
| 2120 | #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ | ||
| 2121 | malloc_init_hard_cleanup(tsdn, reentrancy); \ | ||
| 2122 | return ret; | ||
| 2123 | |||
| 2124 | if (!malloc_init_hard_needed()) { | ||
| 2125 | UNLOCK_RETURN(TSDN_NULL, false, false) | ||
| 2126 | } | ||
| 2127 | |||
| 2128 | if (malloc_init_state != malloc_init_a0_initialized && | ||
| 2129 | malloc_init_hard_a0_locked()) { | ||
| 2130 | UNLOCK_RETURN(TSDN_NULL, true, false) | ||
| 2131 | } | ||
| 2132 | |||
| 2133 | malloc_mutex_unlock(TSDN_NULL, &init_lock); | ||
| 2134 | /* Recursive allocation relies on functional tsd. */ | ||
| 2135 | tsd = malloc_tsd_boot0(); | ||
| 2136 | if (tsd == NULL) { | ||
| 2137 | return true; | ||
| 2138 | } | ||
| 2139 | if (malloc_init_hard_recursible()) { | ||
| 2140 | return true; | ||
| 2141 | } | ||
| 2142 | |||
| 2143 | malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); | ||
| 2144 | /* Set reentrancy level to 1 during init. */ | ||
| 2145 | pre_reentrancy(tsd, NULL); | ||
| 2146 | /* Initialize narenas before prof_boot2 (for allocation). */ | ||
| 2147 | if (malloc_init_narenas() | ||
| 2148 | || background_thread_boot1(tsd_tsdn(tsd), b0get())) { | ||
| 2149 | UNLOCK_RETURN(tsd_tsdn(tsd), true, true) | ||
| 2150 | } | ||
| 2151 | if (config_prof && prof_boot2(tsd, b0get())) { | ||
| 2152 | UNLOCK_RETURN(tsd_tsdn(tsd), true, true) | ||
| 2153 | } | ||
| 2154 | |||
| 2155 | malloc_init_percpu(); | ||
| 2156 | |||
| 2157 | if (malloc_init_hard_finish()) { | ||
| 2158 | UNLOCK_RETURN(tsd_tsdn(tsd), true, true) | ||
| 2159 | } | ||
| 2160 | post_reentrancy(tsd); | ||
| 2161 | malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); | ||
| 2162 | |||
| 2163 | witness_assert_lockless(witness_tsd_tsdn( | ||
| 2164 | tsd_witness_tsdp_get_unsafe(tsd))); | ||
| 2165 | malloc_tsd_boot1(); | ||
| 2166 | /* Update TSD after tsd_boot1. */ | ||
| 2167 | tsd = tsd_fetch(); | ||
| 2168 | if (opt_background_thread) { | ||
| 2169 | assert(have_background_thread); | ||
| 2170 | /* | ||
| 2171 | * Need to finish init & unlock first before creating background | ||
| 2172 | * threads (pthread_create depends on malloc). ctl_init (which | ||
| 2173 | * sets isthreaded) needs to be called without holding any lock. | ||
| 2174 | */ | ||
| 2175 | background_thread_ctl_init(tsd_tsdn(tsd)); | ||
| 2176 | if (background_thread_create(tsd, 0)) { | ||
| 2177 | return true; | ||
| 2178 | } | ||
| 2179 | } | ||
| 2180 | #undef UNLOCK_RETURN | ||
| 2181 | return false; | ||
| 2182 | } | ||
| 2183 | |||
| 2184 | /* | ||
| 2185 | * End initialization functions. | ||
| 2186 | */ | ||
| 2187 | /******************************************************************************/ | ||
| 2188 | /* | ||
| 2189 | * Begin allocation-path internal functions and data structures. | ||
| 2190 | */ | ||
| 2191 | |||
| 2192 | /* | ||
| 2193 | * Settings determined by the documented behavior of the allocation functions. | ||
| 2194 | */ | ||
| 2195 | typedef struct static_opts_s static_opts_t; | ||
| 2196 | struct static_opts_s { | ||
| 2197 | /* Whether or not allocation size may overflow. */ | ||
| 2198 | bool may_overflow; | ||
| 2199 | |||
| 2200 | /* | ||
| 2201 | * Whether or not allocations (with alignment) of size 0 should be | ||
| 2202 | * treated as size 1. | ||
| 2203 | */ | ||
| 2204 | bool bump_empty_aligned_alloc; | ||
| 2205 | /* | ||
| 2206 | * Whether to assert that allocations are not of size 0 (after any | ||
| 2207 | * bumping). | ||
| 2208 | */ | ||
| 2209 | bool assert_nonempty_alloc; | ||
| 2210 | |||
| 2211 | /* | ||
| 2212 | * Whether or not to modify the 'result' argument to malloc in case of | ||
| 2213 | * error. | ||
| 2214 | */ | ||
| 2215 | bool null_out_result_on_error; | ||
| 2216 | /* Whether to set errno when we encounter an error condition. */ | ||
| 2217 | bool set_errno_on_error; | ||
| 2218 | |||
| 2219 | /* | ||
| 2220 | * The minimum valid alignment for functions requesting aligned storage. | ||
| 2221 | */ | ||
| 2222 | size_t min_alignment; | ||
| 2223 | |||
| 2224 | /* The error string to use if we oom. */ | ||
| 2225 | const char *oom_string; | ||
| 2226 | /* The error string to use if the passed-in alignment is invalid. */ | ||
| 2227 | const char *invalid_alignment_string; | ||
| 2228 | |||
| 2229 | /* | ||
| 2230 | * False if we're configured to skip some time-consuming operations. | ||
| 2231 | * | ||
| 2232 | * This isn't really a malloc "behavior", but it acts as a useful | ||
| 2233 | * summary of several other static (or at least, static after program | ||
| 2234 | * initialization) options. | ||
| 2235 | */ | ||
| 2236 | bool slow; | ||
| 2237 | /* | ||
| 2238 | * Return size. | ||
| 2239 | */ | ||
| 2240 | bool usize; | ||
| 2241 | }; | ||
| 2242 | |||
| 2243 | JEMALLOC_ALWAYS_INLINE void | ||
| 2244 | static_opts_init(static_opts_t *static_opts) { | ||
| 2245 | static_opts->may_overflow = false; | ||
| 2246 | static_opts->bump_empty_aligned_alloc = false; | ||
| 2247 | static_opts->assert_nonempty_alloc = false; | ||
| 2248 | static_opts->null_out_result_on_error = false; | ||
| 2249 | static_opts->set_errno_on_error = false; | ||
| 2250 | static_opts->min_alignment = 0; | ||
| 2251 | static_opts->oom_string = ""; | ||
| 2252 | static_opts->invalid_alignment_string = ""; | ||
| 2253 | static_opts->slow = false; | ||
| 2254 | static_opts->usize = false; | ||
| 2255 | } | ||
| 2256 | |||
| 2257 | /* | ||
| 2258 | * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we | ||
| 2259 | * should have one constant here per magic value there. Note however that the | ||
| 2260 | * representations need not be related. | ||
| 2261 | */ | ||
| 2262 | #define TCACHE_IND_NONE ((unsigned)-1) | ||
| 2263 | #define TCACHE_IND_AUTOMATIC ((unsigned)-2) | ||
| 2264 | #define ARENA_IND_AUTOMATIC ((unsigned)-1) | ||
| 2265 | |||
| 2266 | typedef struct dynamic_opts_s dynamic_opts_t; | ||
| 2267 | struct dynamic_opts_s { | ||
| 2268 | void **result; | ||
| 2269 | size_t usize; | ||
| 2270 | size_t num_items; | ||
| 2271 | size_t item_size; | ||
| 2272 | size_t alignment; | ||
| 2273 | bool zero; | ||
| 2274 | unsigned tcache_ind; | ||
| 2275 | unsigned arena_ind; | ||
| 2276 | }; | ||
| 2277 | |||
| 2278 | JEMALLOC_ALWAYS_INLINE void | ||
| 2279 | dynamic_opts_init(dynamic_opts_t *dynamic_opts) { | ||
| 2280 | dynamic_opts->result = NULL; | ||
| 2281 | dynamic_opts->usize = 0; | ||
| 2282 | dynamic_opts->num_items = 0; | ||
| 2283 | dynamic_opts->item_size = 0; | ||
| 2284 | dynamic_opts->alignment = 0; | ||
| 2285 | dynamic_opts->zero = false; | ||
| 2286 | dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; | ||
| 2287 | dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | /* | ||
| 2291 | * ind parameter is optional and is only checked and filled if alignment == 0; | ||
| 2292 | * return true if result is out of range. | ||
| 2293 | */ | ||
| 2294 | JEMALLOC_ALWAYS_INLINE bool | ||
| 2295 | aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind, | ||
| 2296 | bool bump_empty_aligned_alloc) { | ||
| 2297 | assert(usize != NULL); | ||
| 2298 | if (alignment == 0) { | ||
| 2299 | if (ind != NULL) { | ||
| 2300 | *ind = sz_size2index(size); | ||
| 2301 | if (unlikely(*ind >= SC_NSIZES)) { | ||
| 2302 | return true; | ||
| 2303 | } | ||
| 2304 | *usize = sz_index2size(*ind); | ||
| 2305 | assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS); | ||
| 2306 | return false; | ||
| 2307 | } | ||
| 2308 | *usize = sz_s2u(size); | ||
| 2309 | } else { | ||
| 2310 | if (bump_empty_aligned_alloc && unlikely(size == 0)) { | ||
| 2311 | size = 1; | ||
| 2312 | } | ||
| 2313 | *usize = sz_sa2u(size, alignment); | ||
| 2314 | } | ||
| 2315 | if (unlikely(*usize == 0 || *usize > SC_LARGE_MAXCLASS)) { | ||
| 2316 | return true; | ||
| 2317 | } | ||
| 2318 | return false; | ||
| 2319 | } | ||
| 2320 | |||
| 2321 | JEMALLOC_ALWAYS_INLINE bool | ||
| 2322 | zero_get(bool guarantee, bool slow) { | ||
| 2323 | if (config_fill && slow && unlikely(opt_zero)) { | ||
| 2324 | return true; | ||
| 2325 | } else { | ||
| 2326 | return guarantee; | ||
| 2327 | } | ||
| 2328 | } | ||
| 2329 | |||
| 2330 | JEMALLOC_ALWAYS_INLINE tcache_t * | ||
| 2331 | tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) { | ||
| 2332 | tcache_t *tcache; | ||
| 2333 | if (tcache_ind == TCACHE_IND_AUTOMATIC) { | ||
| 2334 | if (likely(!slow)) { | ||
| 2335 | /* Getting tcache ptr unconditionally. */ | ||
| 2336 | tcache = tsd_tcachep_get(tsd); | ||
| 2337 | assert(tcache == tcache_get(tsd)); | ||
| 2338 | } else if (is_alloc || | ||
| 2339 | likely(tsd_reentrancy_level_get(tsd) == 0)) { | ||
| 2340 | tcache = tcache_get(tsd); | ||
| 2341 | } else { | ||
| 2342 | tcache = NULL; | ||
| 2343 | } | ||
| 2344 | } else { | ||
| 2345 | /* | ||
| 2346 | * Should not specify tcache on deallocation path when being | ||
| 2347 | * reentrant. | ||
| 2348 | */ | ||
| 2349 | assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 || | ||
| 2350 | tsd_state_nocleanup(tsd)); | ||
| 2351 | if (tcache_ind == TCACHE_IND_NONE) { | ||
| 2352 | tcache = NULL; | ||
| 2353 | } else { | ||
| 2354 | tcache = tcaches_get(tsd, tcache_ind); | ||
| 2355 | } | ||
| 2356 | } | ||
| 2357 | return tcache; | ||
| 2358 | } | ||
| 2359 | |||
| 2360 | /* Return true if a manual arena is specified and arena_get() OOMs. */ | ||
| 2361 | JEMALLOC_ALWAYS_INLINE bool | ||
| 2362 | arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) { | ||
| 2363 | if (arena_ind == ARENA_IND_AUTOMATIC) { | ||
| 2364 | /* | ||
| 2365 | * In case of automatic arena management, we defer arena | ||
| 2366 | * computation until as late as we can, hoping to fill the | ||
| 2367 | * allocation out of the tcache. | ||
| 2368 | */ | ||
| 2369 | *arena_p = NULL; | ||
| 2370 | } else { | ||
| 2371 | *arena_p = arena_get(tsd_tsdn(tsd), arena_ind, true); | ||
| 2372 | if (unlikely(*arena_p == NULL) && arena_ind >= narenas_auto) { | ||
| 2373 | return true; | ||
| 2374 | } | ||
| 2375 | } | ||
| 2376 | return false; | ||
| 2377 | } | ||
| 2378 | |||
| 2379 | /* ind is ignored if dopts->alignment > 0. */ | ||
| 2380 | JEMALLOC_ALWAYS_INLINE void * | ||
| 2381 | imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, | ||
| 2382 | size_t size, size_t usize, szind_t ind) { | ||
| 2383 | /* Fill in the tcache. */ | ||
| 2384 | tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind, | ||
| 2385 | sopts->slow, /* is_alloc */ true); | ||
| 2386 | |||
| 2387 | /* Fill in the arena. */ | ||
| 2388 | arena_t *arena; | ||
| 2389 | if (arena_get_from_ind(tsd, dopts->arena_ind, &arena)) { | ||
| 2390 | return NULL; | ||
| 2391 | } | ||
| 2392 | |||
| 2393 | if (unlikely(dopts->alignment != 0)) { | ||
| 2394 | return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, | ||
| 2395 | dopts->zero, tcache, arena); | ||
| 2396 | } | ||
| 2397 | |||
| 2398 | return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, | ||
| 2399 | arena, sopts->slow); | ||
| 2400 | } | ||
| 2401 | |||
| 2402 | JEMALLOC_ALWAYS_INLINE void * | ||
| 2403 | imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, | ||
| 2404 | size_t usize, szind_t ind) { | ||
| 2405 | void *ret; | ||
| 2406 | |||
| 2407 | /* | ||
| 2408 | * For small allocations, sampling bumps the usize. If so, we allocate | ||
| 2409 | * from the ind_large bucket. | ||
| 2410 | */ | ||
| 2411 | szind_t ind_large; | ||
| 2412 | size_t bumped_usize = usize; | ||
| 2413 | |||
| 2414 | dopts->alignment = prof_sample_align(dopts->alignment); | ||
| 2415 | if (usize <= SC_SMALL_MAXCLASS) { | ||
| 2416 | assert(((dopts->alignment == 0) ? | ||
| 2417 | sz_s2u(SC_LARGE_MINCLASS) : | ||
| 2418 | sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment)) | ||
| 2419 | == SC_LARGE_MINCLASS); | ||
| 2420 | ind_large = sz_size2index(SC_LARGE_MINCLASS); | ||
| 2421 | bumped_usize = sz_s2u(SC_LARGE_MINCLASS); | ||
| 2422 | ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, | ||
| 2423 | bumped_usize, ind_large); | ||
| 2424 | if (unlikely(ret == NULL)) { | ||
| 2425 | return NULL; | ||
| 2426 | } | ||
| 2427 | arena_prof_promote(tsd_tsdn(tsd), ret, usize); | ||
| 2428 | } else { | ||
| 2429 | ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); | ||
| 2430 | } | ||
| 2431 | assert(prof_sample_aligned(ret)); | ||
| 2432 | |||
| 2433 | return ret; | ||
| 2434 | } | ||
| 2435 | |||
| 2436 | /* | ||
| 2437 | * Returns true if the allocation will overflow, and false otherwise. Sets | ||
| 2438 | * *size to the product either way. | ||
| 2439 | */ | ||
| 2440 | JEMALLOC_ALWAYS_INLINE bool | ||
| 2441 | compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, | ||
| 2442 | size_t *size) { | ||
| 2443 | /* | ||
| 2444 | * This function is just num_items * item_size, except that we may have | ||
| 2445 | * to check for overflow. | ||
| 2446 | */ | ||
| 2447 | |||
| 2448 | if (!may_overflow) { | ||
| 2449 | assert(dopts->num_items == 1); | ||
| 2450 | *size = dopts->item_size; | ||
| 2451 | return false; | ||
| 2452 | } | ||
| 2453 | |||
| 2454 | /* A size_t with its high-half bits all set to 1. */ | ||
| 2455 | static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); | ||
| 2456 | |||
| 2457 | *size = dopts->item_size * dopts->num_items; | ||
| 2458 | |||
| 2459 | if (unlikely(*size == 0)) { | ||
| 2460 | return (dopts->num_items != 0 && dopts->item_size != 0); | ||
| 2461 | } | ||
| 2462 | |||
| 2463 | /* | ||
| 2464 | * We got a non-zero size, but we don't know if we overflowed to get | ||
| 2465 | * there. To avoid having to do a divide, we'll be clever and note that | ||
| 2466 | * if both A and B can be represented in N/2 bits, then their product | ||
| 2467 | * can be represented in N bits (without the possibility of overflow). | ||
| 2468 | */ | ||
| 2469 | if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { | ||
| 2470 | return false; | ||
| 2471 | } | ||
| 2472 | if (likely(*size / dopts->item_size == dopts->num_items)) { | ||
| 2473 | return false; | ||
| 2474 | } | ||
| 2475 | return true; | ||
| 2476 | } | ||
| 2477 | |||
| 2478 | JEMALLOC_ALWAYS_INLINE int | ||
| 2479 | imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { | ||
| 2480 | /* Where the actual allocated memory will live. */ | ||
| 2481 | void *allocation = NULL; | ||
| 2482 | /* Filled in by compute_size_with_overflow below. */ | ||
| 2483 | size_t size = 0; | ||
| 2484 | /* | ||
| 2485 | * The zero initialization for ind is actually dead store, in that its | ||
| 2486 | * value is reset before any branch on its value is taken. Sometimes | ||
| 2487 | * though, it's convenient to pass it as arguments before this point. | ||
| 2488 | * To avoid undefined behavior then, we initialize it with dummy stores. | ||
| 2489 | */ | ||
| 2490 | szind_t ind = 0; | ||
| 2491 | /* usize will always be properly initialized. */ | ||
| 2492 | size_t usize; | ||
| 2493 | |||
| 2494 | /* Reentrancy is only checked on slow path. */ | ||
| 2495 | int8_t reentrancy_level; | ||
| 2496 | |||
| 2497 | /* Compute the amount of memory the user wants. */ | ||
| 2498 | if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, | ||
| 2499 | &size))) { | ||
| 2500 | goto label_oom; | ||
| 2501 | } | ||
| 2502 | |||
| 2503 | if (unlikely(dopts->alignment < sopts->min_alignment | ||
| 2504 | || (dopts->alignment & (dopts->alignment - 1)) != 0)) { | ||
| 2505 | goto label_invalid_alignment; | ||
| 2506 | } | ||
| 2507 | |||
| 2508 | /* This is the beginning of the "core" algorithm. */ | ||
| 2509 | dopts->zero = zero_get(dopts->zero, sopts->slow); | ||
| 2510 | if (aligned_usize_get(size, dopts->alignment, &usize, &ind, | ||
| 2511 | sopts->bump_empty_aligned_alloc)) { | ||
| 2512 | goto label_oom; | ||
| 2513 | } | ||
| 2514 | dopts->usize = usize; | ||
| 2515 | /* Validate the user input. */ | ||
| 2516 | if (sopts->assert_nonempty_alloc) { | ||
| 2517 | assert (size != 0); | ||
| 2518 | } | ||
| 2519 | |||
| 2520 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 2521 | |||
| 2522 | /* | ||
| 2523 | * If we need to handle reentrancy, we can do it out of a | ||
| 2524 | * known-initialized arena (i.e. arena 0). | ||
| 2525 | */ | ||
| 2526 | reentrancy_level = tsd_reentrancy_level_get(tsd); | ||
| 2527 | if (sopts->slow && unlikely(reentrancy_level > 0)) { | ||
| 2528 | /* | ||
| 2529 | * We should never specify particular arenas or tcaches from | ||
| 2530 | * within our internal allocations. | ||
| 2531 | */ | ||
| 2532 | assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || | ||
| 2533 | dopts->tcache_ind == TCACHE_IND_NONE); | ||
| 2534 | assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); | ||
| 2535 | dopts->tcache_ind = TCACHE_IND_NONE; | ||
| 2536 | /* We know that arena 0 has already been initialized. */ | ||
| 2537 | dopts->arena_ind = 0; | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | /* | ||
| 2541 | * If dopts->alignment > 0, then ind is still 0, but usize was computed | ||
| 2542 | * in the previous if statement. Down the positive alignment path, | ||
| 2543 | * imalloc_no_sample and imalloc_sample will ignore ind. | ||
| 2544 | */ | ||
| 2545 | |||
| 2546 | /* If profiling is on, get our profiling context. */ | ||
| 2547 | if (config_prof && opt_prof) { | ||
| 2548 | bool prof_active = prof_active_get_unlocked(); | ||
| 2549 | bool sample_event = te_prof_sample_event_lookahead(tsd, usize); | ||
| 2550 | prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, | ||
| 2551 | sample_event); | ||
| 2552 | |||
| 2553 | emap_alloc_ctx_t alloc_ctx; | ||
| 2554 | if (likely((uintptr_t)tctx == (uintptr_t)1U)) { | ||
| 2555 | alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS); | ||
| 2556 | allocation = imalloc_no_sample( | ||
| 2557 | sopts, dopts, tsd, usize, usize, ind); | ||
| 2558 | } else if ((uintptr_t)tctx > (uintptr_t)1U) { | ||
| 2559 | allocation = imalloc_sample( | ||
| 2560 | sopts, dopts, tsd, usize, ind); | ||
| 2561 | alloc_ctx.slab = false; | ||
| 2562 | } else { | ||
| 2563 | allocation = NULL; | ||
| 2564 | } | ||
| 2565 | |||
| 2566 | if (unlikely(allocation == NULL)) { | ||
| 2567 | prof_alloc_rollback(tsd, tctx); | ||
| 2568 | goto label_oom; | ||
| 2569 | } | ||
| 2570 | prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx); | ||
| 2571 | } else { | ||
| 2572 | assert(!opt_prof); | ||
| 2573 | allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, | ||
| 2574 | ind); | ||
| 2575 | if (unlikely(allocation == NULL)) { | ||
| 2576 | goto label_oom; | ||
| 2577 | } | ||
| 2578 | } | ||
| 2579 | |||
| 2580 | /* | ||
| 2581 | * Allocation has been done at this point. We still have some | ||
| 2582 | * post-allocation work to do though. | ||
| 2583 | */ | ||
| 2584 | |||
| 2585 | thread_alloc_event(tsd, usize); | ||
| 2586 | |||
| 2587 | assert(dopts->alignment == 0 | ||
| 2588 | || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); | ||
| 2589 | |||
| 2590 | assert(usize == isalloc(tsd_tsdn(tsd), allocation)); | ||
| 2591 | |||
| 2592 | if (config_fill && sopts->slow && !dopts->zero | ||
| 2593 | && unlikely(opt_junk_alloc)) { | ||
| 2594 | junk_alloc_callback(allocation, usize); | ||
| 2595 | } | ||
| 2596 | |||
| 2597 | if (sopts->slow) { | ||
| 2598 | UTRACE(0, size, allocation); | ||
| 2599 | } | ||
| 2600 | |||
| 2601 | /* Success! */ | ||
| 2602 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 2603 | *dopts->result = allocation; | ||
| 2604 | return 0; | ||
| 2605 | |||
| 2606 | label_oom: | ||
| 2607 | if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { | ||
| 2608 | malloc_write(sopts->oom_string); | ||
| 2609 | abort(); | ||
| 2610 | } | ||
| 2611 | |||
| 2612 | if (sopts->slow) { | ||
| 2613 | UTRACE(NULL, size, NULL); | ||
| 2614 | } | ||
| 2615 | |||
| 2616 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 2617 | |||
| 2618 | if (sopts->set_errno_on_error) { | ||
| 2619 | set_errno(ENOMEM); | ||
| 2620 | } | ||
| 2621 | |||
| 2622 | if (sopts->null_out_result_on_error) { | ||
| 2623 | *dopts->result = NULL; | ||
| 2624 | } | ||
| 2625 | |||
| 2626 | return ENOMEM; | ||
| 2627 | |||
| 2628 | /* | ||
| 2629 | * This label is only jumped to by one goto; we move it out of line | ||
| 2630 | * anyways to avoid obscuring the non-error paths, and for symmetry with | ||
| 2631 | * the oom case. | ||
| 2632 | */ | ||
| 2633 | label_invalid_alignment: | ||
| 2634 | if (config_xmalloc && unlikely(opt_xmalloc)) { | ||
| 2635 | malloc_write(sopts->invalid_alignment_string); | ||
| 2636 | abort(); | ||
| 2637 | } | ||
| 2638 | |||
| 2639 | if (sopts->set_errno_on_error) { | ||
| 2640 | set_errno(EINVAL); | ||
| 2641 | } | ||
| 2642 | |||
| 2643 | if (sopts->slow) { | ||
| 2644 | UTRACE(NULL, size, NULL); | ||
| 2645 | } | ||
| 2646 | |||
| 2647 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 2648 | |||
| 2649 | if (sopts->null_out_result_on_error) { | ||
| 2650 | *dopts->result = NULL; | ||
| 2651 | } | ||
| 2652 | |||
| 2653 | return EINVAL; | ||
| 2654 | } | ||
| 2655 | |||
| 2656 | JEMALLOC_ALWAYS_INLINE bool | ||
| 2657 | imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { | ||
| 2658 | if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { | ||
| 2659 | if (config_xmalloc && unlikely(opt_xmalloc)) { | ||
| 2660 | malloc_write(sopts->oom_string); | ||
| 2661 | abort(); | ||
| 2662 | } | ||
| 2663 | UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); | ||
| 2664 | set_errno(ENOMEM); | ||
| 2665 | *dopts->result = NULL; | ||
| 2666 | |||
| 2667 | return false; | ||
| 2668 | } | ||
| 2669 | |||
| 2670 | return true; | ||
| 2671 | } | ||
| 2672 | |||
| 2673 | /* Returns the errno-style error code of the allocation. */ | ||
| 2674 | JEMALLOC_ALWAYS_INLINE int | ||
| 2675 | imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { | ||
| 2676 | if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { | ||
| 2677 | return ENOMEM; | ||
| 2678 | } | ||
| 2679 | |||
| 2680 | /* We always need the tsd. Let's grab it right away. */ | ||
| 2681 | tsd_t *tsd = tsd_fetch(); | ||
| 2682 | assert(tsd); | ||
| 2683 | if (likely(tsd_fast(tsd))) { | ||
| 2684 | /* Fast and common path. */ | ||
| 2685 | tsd_assert_fast(tsd); | ||
| 2686 | sopts->slow = false; | ||
| 2687 | return imalloc_body(sopts, dopts, tsd); | ||
| 2688 | } else { | ||
| 2689 | if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { | ||
| 2690 | return ENOMEM; | ||
| 2691 | } | ||
| 2692 | |||
| 2693 | sopts->slow = true; | ||
| 2694 | return imalloc_body(sopts, dopts, tsd); | ||
| 2695 | } | ||
| 2696 | } | ||
| 2697 | |||
| 2698 | JEMALLOC_NOINLINE | ||
| 2699 | void * | ||
| 2700 | malloc_default(size_t size, size_t *usize) { | ||
| 2701 | void *ret; | ||
| 2702 | static_opts_t sopts; | ||
| 2703 | dynamic_opts_t dopts; | ||
| 2704 | |||
| 2705 | /* | ||
| 2706 | * This variant has logging hook on exit but not on entry. It's callled | ||
| 2707 | * only by je_malloc, below, which emits the entry one for us (and, if | ||
| 2708 | * it calls us, does so only via tail call). | ||
| 2709 | */ | ||
| 2710 | |||
| 2711 | static_opts_init(&sopts); | ||
| 2712 | dynamic_opts_init(&dopts); | ||
| 2713 | |||
| 2714 | sopts.null_out_result_on_error = true; | ||
| 2715 | sopts.set_errno_on_error = true; | ||
| 2716 | sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; | ||
| 2717 | |||
| 2718 | dopts.result = &ret; | ||
| 2719 | dopts.num_items = 1; | ||
| 2720 | dopts.item_size = size; | ||
| 2721 | |||
| 2722 | imalloc(&sopts, &dopts); | ||
| 2723 | /* | ||
| 2724 | * Note that this branch gets optimized away -- it immediately follows | ||
| 2725 | * the check on tsd_fast that sets sopts.slow. | ||
| 2726 | */ | ||
| 2727 | if (sopts.slow) { | ||
| 2728 | uintptr_t args[3] = {size}; | ||
| 2729 | hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args); | ||
| 2730 | } | ||
| 2731 | |||
| 2732 | LOG("core.malloc.exit", "result: %p", ret); | ||
| 2733 | |||
| 2734 | if (usize) *usize = dopts.usize; | ||
| 2735 | return ret; | ||
| 2736 | } | ||
| 2737 | |||
| 2738 | /******************************************************************************/ | ||
| 2739 | /* | ||
| 2740 | * Begin malloc(3)-compatible functions. | ||
| 2741 | */ | ||
| 2742 | |||
| 2743 | static inline void *je_malloc_internal(size_t size, size_t *usize) { | ||
| 2744 | return imalloc_fastpath(size, &malloc_default, usize); | ||
| 2745 | } | ||
| 2746 | |||
| 2747 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 2748 | void JEMALLOC_NOTHROW * | ||
| 2749 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) | ||
| 2750 | je_malloc(size_t size) { | ||
| 2751 | return je_malloc_internal(size, NULL); | ||
| 2752 | } | ||
| 2753 | |||
| 2754 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW | ||
| 2755 | JEMALLOC_ATTR(nonnull(1)) | ||
| 2756 | je_posix_memalign(void **memptr, size_t alignment, size_t size) { | ||
| 2757 | int ret; | ||
| 2758 | static_opts_t sopts; | ||
| 2759 | dynamic_opts_t dopts; | ||
| 2760 | |||
| 2761 | LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " | ||
| 2762 | "size: %zu", memptr, alignment, size); | ||
| 2763 | |||
| 2764 | static_opts_init(&sopts); | ||
| 2765 | dynamic_opts_init(&dopts); | ||
| 2766 | |||
| 2767 | sopts.bump_empty_aligned_alloc = true; | ||
| 2768 | sopts.min_alignment = sizeof(void *); | ||
| 2769 | sopts.oom_string = | ||
| 2770 | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||
| 2771 | sopts.invalid_alignment_string = | ||
| 2772 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||
| 2773 | |||
| 2774 | dopts.result = memptr; | ||
| 2775 | dopts.num_items = 1; | ||
| 2776 | dopts.item_size = size; | ||
| 2777 | dopts.alignment = alignment; | ||
| 2778 | |||
| 2779 | ret = imalloc(&sopts, &dopts); | ||
| 2780 | if (sopts.slow) { | ||
| 2781 | uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment, | ||
| 2782 | (uintptr_t)size}; | ||
| 2783 | hook_invoke_alloc(hook_alloc_posix_memalign, *memptr, | ||
| 2784 | (uintptr_t)ret, args); | ||
| 2785 | } | ||
| 2786 | |||
| 2787 | LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, | ||
| 2788 | *memptr); | ||
| 2789 | |||
| 2790 | return ret; | ||
| 2791 | } | ||
| 2792 | |||
| 2793 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 2794 | void JEMALLOC_NOTHROW * | ||
| 2795 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) | ||
| 2796 | je_aligned_alloc(size_t alignment, size_t size) { | ||
| 2797 | void *ret; | ||
| 2798 | |||
| 2799 | static_opts_t sopts; | ||
| 2800 | dynamic_opts_t dopts; | ||
| 2801 | |||
| 2802 | LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", | ||
| 2803 | alignment, size); | ||
| 2804 | |||
| 2805 | static_opts_init(&sopts); | ||
| 2806 | dynamic_opts_init(&dopts); | ||
| 2807 | |||
| 2808 | sopts.bump_empty_aligned_alloc = true; | ||
| 2809 | sopts.null_out_result_on_error = true; | ||
| 2810 | sopts.set_errno_on_error = true; | ||
| 2811 | sopts.min_alignment = 1; | ||
| 2812 | sopts.oom_string = | ||
| 2813 | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||
| 2814 | sopts.invalid_alignment_string = | ||
| 2815 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||
| 2816 | |||
| 2817 | dopts.result = &ret; | ||
| 2818 | dopts.num_items = 1; | ||
| 2819 | dopts.item_size = size; | ||
| 2820 | dopts.alignment = alignment; | ||
| 2821 | |||
| 2822 | imalloc(&sopts, &dopts); | ||
| 2823 | if (sopts.slow) { | ||
| 2824 | uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size}; | ||
| 2825 | hook_invoke_alloc(hook_alloc_aligned_alloc, ret, | ||
| 2826 | (uintptr_t)ret, args); | ||
| 2827 | } | ||
| 2828 | |||
| 2829 | LOG("core.aligned_alloc.exit", "result: %p", ret); | ||
| 2830 | |||
| 2831 | return ret; | ||
| 2832 | } | ||
| 2833 | |||
| 2834 | static void *je_calloc_internal(size_t num, size_t size, size_t *usize) { | ||
| 2835 | void *ret; | ||
| 2836 | static_opts_t sopts; | ||
| 2837 | dynamic_opts_t dopts; | ||
| 2838 | |||
| 2839 | LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); | ||
| 2840 | |||
| 2841 | static_opts_init(&sopts); | ||
| 2842 | dynamic_opts_init(&dopts); | ||
| 2843 | |||
| 2844 | sopts.may_overflow = true; | ||
| 2845 | sopts.null_out_result_on_error = true; | ||
| 2846 | sopts.set_errno_on_error = true; | ||
| 2847 | sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; | ||
| 2848 | |||
| 2849 | dopts.result = &ret; | ||
| 2850 | dopts.num_items = num; | ||
| 2851 | dopts.item_size = size; | ||
| 2852 | dopts.zero = true; | ||
| 2853 | |||
| 2854 | imalloc(&sopts, &dopts); | ||
| 2855 | if (sopts.slow) { | ||
| 2856 | uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; | ||
| 2857 | hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); | ||
| 2858 | } | ||
| 2859 | |||
| 2860 | LOG("core.calloc.exit", "result: %p", ret); | ||
| 2861 | |||
| 2862 | if (usize) *usize = dopts.usize; | ||
| 2863 | return ret; | ||
| 2864 | } | ||
| 2865 | |||
| 2866 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 2867 | void JEMALLOC_NOTHROW * | ||
| 2868 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) | ||
| 2869 | je_calloc(size_t num, size_t size) { | ||
| 2870 | return je_calloc_internal(num, size, NULL); | ||
| 2871 | } | ||
| 2872 | |||
| 2873 | JEMALLOC_ALWAYS_INLINE void | ||
| 2874 | ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path, size_t *usable) { | ||
| 2875 | if (!slow_path) { | ||
| 2876 | tsd_assert_fast(tsd); | ||
| 2877 | } | ||
| 2878 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 2879 | if (tsd_reentrancy_level_get(tsd) != 0) { | ||
| 2880 | assert(slow_path); | ||
| 2881 | } | ||
| 2882 | |||
| 2883 | assert(ptr != NULL); | ||
| 2884 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 2885 | |||
| 2886 | emap_alloc_ctx_t alloc_ctx; | ||
| 2887 | emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, | ||
| 2888 | &alloc_ctx); | ||
| 2889 | assert(alloc_ctx.szind != SC_NSIZES); | ||
| 2890 | |||
| 2891 | size_t usize = sz_index2size(alloc_ctx.szind); | ||
| 2892 | if (config_prof && opt_prof) { | ||
| 2893 | prof_free(tsd, ptr, usize, &alloc_ctx); | ||
| 2894 | } | ||
| 2895 | |||
| 2896 | if (likely(!slow_path)) { | ||
| 2897 | idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, | ||
| 2898 | false); | ||
| 2899 | } else { | ||
| 2900 | if (config_fill && slow_path && opt_junk_free) { | ||
| 2901 | junk_free_callback(ptr, usize); | ||
| 2902 | } | ||
| 2903 | idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, | ||
| 2904 | true); | ||
| 2905 | } | ||
| 2906 | thread_dalloc_event(tsd, usize); | ||
| 2907 | if (usable) *usable = usize; | ||
| 2908 | } | ||
| 2909 | |||
| 2910 | JEMALLOC_ALWAYS_INLINE bool | ||
| 2911 | maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) { | ||
| 2912 | if (config_opt_size_checks) { | ||
| 2913 | emap_alloc_ctx_t dbg_ctx; | ||
| 2914 | emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, | ||
| 2915 | &dbg_ctx); | ||
| 2916 | if (alloc_ctx->szind != dbg_ctx.szind) { | ||
| 2917 | safety_check_fail_sized_dealloc( | ||
| 2918 | /* current_dealloc */ true, ptr, | ||
| 2919 | /* true_size */ sz_size2index(dbg_ctx.szind), | ||
| 2920 | /* input_size */ sz_size2index(alloc_ctx->szind)); | ||
| 2921 | return true; | ||
| 2922 | } | ||
| 2923 | if (alloc_ctx->slab != dbg_ctx.slab) { | ||
| 2924 | safety_check_fail( | ||
| 2925 | "Internal heap corruption detected: " | ||
| 2926 | "mismatch in slab bit"); | ||
| 2927 | return true; | ||
| 2928 | } | ||
| 2929 | } | ||
| 2930 | return false; | ||
| 2931 | } | ||
| 2932 | |||
| 2933 | JEMALLOC_ALWAYS_INLINE void | ||
| 2934 | isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { | ||
| 2935 | if (!slow_path) { | ||
| 2936 | tsd_assert_fast(tsd); | ||
| 2937 | } | ||
| 2938 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 2939 | if (tsd_reentrancy_level_get(tsd) != 0) { | ||
| 2940 | assert(slow_path); | ||
| 2941 | } | ||
| 2942 | |||
| 2943 | assert(ptr != NULL); | ||
| 2944 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 2945 | |||
| 2946 | emap_alloc_ctx_t alloc_ctx; | ||
| 2947 | if (!config_prof) { | ||
| 2948 | alloc_ctx.szind = sz_size2index(usize); | ||
| 2949 | alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); | ||
| 2950 | } else { | ||
| 2951 | if (likely(!prof_sample_aligned(ptr))) { | ||
| 2952 | /* | ||
| 2953 | * When the ptr is not page aligned, it was not sampled. | ||
| 2954 | * usize can be trusted to determine szind and slab. | ||
| 2955 | */ | ||
| 2956 | alloc_ctx.szind = sz_size2index(usize); | ||
| 2957 | alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); | ||
| 2958 | } else if (opt_prof) { | ||
| 2959 | emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, | ||
| 2960 | ptr, &alloc_ctx); | ||
| 2961 | |||
| 2962 | if (config_opt_safety_checks) { | ||
| 2963 | /* Small alloc may have !slab (sampled). */ | ||
| 2964 | if (unlikely(alloc_ctx.szind != | ||
| 2965 | sz_size2index(usize))) { | ||
| 2966 | safety_check_fail_sized_dealloc( | ||
| 2967 | /* current_dealloc */ true, ptr, | ||
| 2968 | /* true_size */ sz_index2size( | ||
| 2969 | alloc_ctx.szind), | ||
| 2970 | /* input_size */ usize); | ||
| 2971 | } | ||
| 2972 | } | ||
| 2973 | } else { | ||
| 2974 | alloc_ctx.szind = sz_size2index(usize); | ||
| 2975 | alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); | ||
| 2976 | } | ||
| 2977 | } | ||
| 2978 | bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); | ||
| 2979 | if (fail) { | ||
| 2980 | /* | ||
| 2981 | * This is a heap corruption bug. In real life we'll crash; for | ||
| 2982 | * the unit test we just want to avoid breaking anything too | ||
| 2983 | * badly to get a test result out. Let's leak instead of trying | ||
| 2984 | * to free. | ||
| 2985 | */ | ||
| 2986 | return; | ||
| 2987 | } | ||
| 2988 | |||
| 2989 | if (config_prof && opt_prof) { | ||
| 2990 | prof_free(tsd, ptr, usize, &alloc_ctx); | ||
| 2991 | } | ||
| 2992 | if (likely(!slow_path)) { | ||
| 2993 | isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, | ||
| 2994 | false); | ||
| 2995 | } else { | ||
| 2996 | if (config_fill && slow_path && opt_junk_free) { | ||
| 2997 | junk_free_callback(ptr, usize); | ||
| 2998 | } | ||
| 2999 | isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, | ||
| 3000 | true); | ||
| 3001 | } | ||
| 3002 | thread_dalloc_event(tsd, usize); | ||
| 3003 | } | ||
| 3004 | |||
| 3005 | JEMALLOC_NOINLINE | ||
| 3006 | void | ||
| 3007 | free_default(void *ptr, size_t *usize) { | ||
| 3008 | UTRACE(ptr, 0, 0); | ||
| 3009 | if (likely(ptr != NULL)) { | ||
| 3010 | /* | ||
| 3011 | * We avoid setting up tsd fully (e.g. tcache, arena binding) | ||
| 3012 | * based on only free() calls -- other activities trigger the | ||
| 3013 | * minimal to full transition. This is because free() may | ||
| 3014 | * happen during thread shutdown after tls deallocation: if a | ||
| 3015 | * thread never had any malloc activities until then, a | ||
| 3016 | * fully-setup tsd won't be destructed properly. | ||
| 3017 | */ | ||
| 3018 | tsd_t *tsd = tsd_fetch_min(); | ||
| 3019 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3020 | |||
| 3021 | if (likely(tsd_fast(tsd))) { | ||
| 3022 | tcache_t *tcache = tcache_get_from_ind(tsd, | ||
| 3023 | TCACHE_IND_AUTOMATIC, /* slow */ false, | ||
| 3024 | /* is_alloc */ false); | ||
| 3025 | ifree(tsd, ptr, tcache, /* slow */ false, usize); | ||
| 3026 | } else { | ||
| 3027 | tcache_t *tcache = tcache_get_from_ind(tsd, | ||
| 3028 | TCACHE_IND_AUTOMATIC, /* slow */ true, | ||
| 3029 | /* is_alloc */ false); | ||
| 3030 | uintptr_t args_raw[3] = {(uintptr_t)ptr}; | ||
| 3031 | hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); | ||
| 3032 | ifree(tsd, ptr, tcache, /* slow */ true, usize); | ||
| 3033 | } | ||
| 3034 | |||
| 3035 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3036 | } | ||
| 3037 | } | ||
| 3038 | |||
| 3039 | JEMALLOC_ALWAYS_INLINE bool | ||
| 3040 | free_fastpath_nonfast_aligned(void *ptr, bool check_prof) { | ||
| 3041 | /* | ||
| 3042 | * free_fastpath do not handle two uncommon cases: 1) sampled profiled | ||
| 3043 | * objects and 2) sampled junk & stash for use-after-free detection. | ||
| 3044 | * Both have special alignments which are used to escape the fastpath. | ||
| 3045 | * | ||
| 3046 | * prof_sample is page-aligned, which covers the UAF check when both | ||
| 3047 | * are enabled (the assertion below). Avoiding redundant checks since | ||
| 3048 | * this is on the fastpath -- at most one runtime branch from this. | ||
| 3049 | */ | ||
| 3050 | if (config_debug && cache_bin_nonfast_aligned(ptr)) { | ||
| 3051 | assert(prof_sample_aligned(ptr)); | ||
| 3052 | } | ||
| 3053 | |||
| 3054 | if (config_prof && check_prof) { | ||
| 3055 | /* When prof is enabled, the prof_sample alignment is enough. */ | ||
| 3056 | if (prof_sample_aligned(ptr)) { | ||
| 3057 | return true; | ||
| 3058 | } else { | ||
| 3059 | return false; | ||
| 3060 | } | ||
| 3061 | } | ||
| 3062 | |||
| 3063 | if (config_uaf_detection) { | ||
| 3064 | if (cache_bin_nonfast_aligned(ptr)) { | ||
| 3065 | return true; | ||
| 3066 | } else { | ||
| 3067 | return false; | ||
| 3068 | } | ||
| 3069 | } | ||
| 3070 | |||
| 3071 | return false; | ||
| 3072 | } | ||
| 3073 | |||
| 3074 | /* Returns whether or not the free attempt was successful. */ | ||
| 3075 | JEMALLOC_ALWAYS_INLINE | ||
| 3076 | bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable_size) { | ||
| 3077 | tsd_t *tsd = tsd_get(false); | ||
| 3078 | /* The branch gets optimized away unless tsd_get_allocates(). */ | ||
| 3079 | if (unlikely(tsd == NULL)) { | ||
| 3080 | return false; | ||
| 3081 | } | ||
| 3082 | /* | ||
| 3083 | * The tsd_fast() / initialized checks are folded into the branch | ||
| 3084 | * testing (deallocated_after >= threshold) later in this function. | ||
| 3085 | * The threshold will be set to 0 when !tsd_fast. | ||
| 3086 | */ | ||
| 3087 | assert(tsd_fast(tsd) || | ||
| 3088 | *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0); | ||
| 3089 | |||
| 3090 | emap_alloc_ctx_t alloc_ctx; | ||
| 3091 | if (!size_hint) { | ||
| 3092 | bool err = emap_alloc_ctx_try_lookup_fast(tsd, | ||
| 3093 | &arena_emap_global, ptr, &alloc_ctx); | ||
| 3094 | |||
| 3095 | /* Note: profiled objects will have alloc_ctx.slab set */ | ||
| 3096 | if (unlikely(err || !alloc_ctx.slab || | ||
| 3097 | free_fastpath_nonfast_aligned(ptr, | ||
| 3098 | /* check_prof */ false))) { | ||
| 3099 | return false; | ||
| 3100 | } | ||
| 3101 | assert(alloc_ctx.szind != SC_NSIZES); | ||
| 3102 | } else { | ||
| 3103 | /* | ||
| 3104 | * Check for both sizes that are too large, and for sampled / | ||
| 3105 | * special aligned objects. The alignment check will also check | ||
| 3106 | * for null ptr. | ||
| 3107 | */ | ||
| 3108 | if (unlikely(size > SC_LOOKUP_MAXCLASS || | ||
| 3109 | free_fastpath_nonfast_aligned(ptr, | ||
| 3110 | /* check_prof */ true))) { | ||
| 3111 | return false; | ||
| 3112 | } | ||
| 3113 | alloc_ctx.szind = sz_size2index_lookup(size); | ||
| 3114 | /* Max lookup class must be small. */ | ||
| 3115 | assert(alloc_ctx.szind < SC_NBINS); | ||
| 3116 | /* This is a dead store, except when opt size checking is on. */ | ||
| 3117 | alloc_ctx.slab = true; | ||
| 3118 | } | ||
| 3119 | /* | ||
| 3120 | * Currently the fastpath only handles small sizes. The branch on | ||
| 3121 | * SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking | ||
| 3122 | * tcache szind upper limit (i.e. tcache_maxclass) as well. | ||
| 3123 | */ | ||
| 3124 | assert(alloc_ctx.slab); | ||
| 3125 | |||
| 3126 | uint64_t deallocated, threshold; | ||
| 3127 | te_free_fastpath_ctx(tsd, &deallocated, &threshold); | ||
| 3128 | |||
| 3129 | size_t usize = sz_index2size(alloc_ctx.szind); | ||
| 3130 | uint64_t deallocated_after = deallocated + usize; | ||
| 3131 | /* | ||
| 3132 | * Check for events and tsd non-nominal (fast_threshold will be set to | ||
| 3133 | * 0) in a single branch. Note that this handles the uninitialized case | ||
| 3134 | * as well (TSD init will be triggered on the non-fastpath). Therefore | ||
| 3135 | * anything depends on a functional TSD (e.g. the alloc_ctx sanity check | ||
| 3136 | * below) needs to be after this branch. | ||
| 3137 | */ | ||
| 3138 | if (unlikely(deallocated_after >= threshold)) { | ||
| 3139 | return false; | ||
| 3140 | } | ||
| 3141 | assert(tsd_fast(tsd)); | ||
| 3142 | bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); | ||
| 3143 | if (fail) { | ||
| 3144 | /* See the comment in isfree. */ | ||
| 3145 | if (usable_size) *usable_size = usize; | ||
| 3146 | return true; | ||
| 3147 | } | ||
| 3148 | |||
| 3149 | tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, | ||
| 3150 | /* slow */ false, /* is_alloc */ false); | ||
| 3151 | cache_bin_t *bin = &tcache->bins[alloc_ctx.szind]; | ||
| 3152 | |||
| 3153 | /* | ||
| 3154 | * If junking were enabled, this is where we would do it. It's not | ||
| 3155 | * though, since we ensured above that we're on the fast path. Assert | ||
| 3156 | * that to double-check. | ||
| 3157 | */ | ||
| 3158 | assert(!opt_junk_free); | ||
| 3159 | |||
| 3160 | if (!cache_bin_dalloc_easy(bin, ptr)) { | ||
| 3161 | return false; | ||
| 3162 | } | ||
| 3163 | |||
| 3164 | *tsd_thread_deallocatedp_get(tsd) = deallocated_after; | ||
| 3165 | |||
| 3166 | if (usable_size) *usable_size = usize; | ||
| 3167 | return true; | ||
| 3168 | } | ||
| 3169 | |||
| 3170 | static inline void je_free_internal(void *ptr, size_t *usize) { | ||
| 3171 | LOG("core.free.entry", "ptr: %p", ptr); | ||
| 3172 | |||
| 3173 | if (!free_fastpath(ptr, 0, false, usize)) { | ||
| 3174 | free_default(ptr, usize); | ||
| 3175 | } | ||
| 3176 | |||
| 3177 | LOG("core.free.exit", ""); | ||
| 3178 | } | ||
| 3179 | |||
| 3180 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW | ||
| 3181 | je_free(void *ptr) { | ||
| 3182 | je_free_internal(ptr, NULL); | ||
| 3183 | } | ||
| 3184 | |||
| 3185 | /* | ||
| 3186 | * End malloc(3)-compatible functions. | ||
| 3187 | */ | ||
| 3188 | /******************************************************************************/ | ||
| 3189 | /* | ||
| 3190 | * Begin non-standard override functions. | ||
| 3191 | */ | ||
| 3192 | |||
| 3193 | #ifdef JEMALLOC_OVERRIDE_MEMALIGN | ||
| 3194 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 3195 | void JEMALLOC_NOTHROW * | ||
| 3196 | JEMALLOC_ATTR(malloc) | ||
| 3197 | je_memalign(size_t alignment, size_t size) { | ||
| 3198 | void *ret; | ||
| 3199 | static_opts_t sopts; | ||
| 3200 | dynamic_opts_t dopts; | ||
| 3201 | |||
| 3202 | LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, | ||
| 3203 | size); | ||
| 3204 | |||
| 3205 | static_opts_init(&sopts); | ||
| 3206 | dynamic_opts_init(&dopts); | ||
| 3207 | |||
| 3208 | sopts.min_alignment = 1; | ||
| 3209 | sopts.oom_string = | ||
| 3210 | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||
| 3211 | sopts.invalid_alignment_string = | ||
| 3212 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||
| 3213 | sopts.null_out_result_on_error = true; | ||
| 3214 | |||
| 3215 | dopts.result = &ret; | ||
| 3216 | dopts.num_items = 1; | ||
| 3217 | dopts.item_size = size; | ||
| 3218 | dopts.alignment = alignment; | ||
| 3219 | |||
| 3220 | imalloc(&sopts, &dopts); | ||
| 3221 | if (sopts.slow) { | ||
| 3222 | uintptr_t args[3] = {alignment, size}; | ||
| 3223 | hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret, | ||
| 3224 | args); | ||
| 3225 | } | ||
| 3226 | |||
| 3227 | LOG("core.memalign.exit", "result: %p", ret); | ||
| 3228 | return ret; | ||
| 3229 | } | ||
| 3230 | #endif | ||
| 3231 | |||
| 3232 | #ifdef JEMALLOC_OVERRIDE_VALLOC | ||
| 3233 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 3234 | void JEMALLOC_NOTHROW * | ||
| 3235 | JEMALLOC_ATTR(malloc) | ||
| 3236 | je_valloc(size_t size) { | ||
| 3237 | void *ret; | ||
| 3238 | |||
| 3239 | static_opts_t sopts; | ||
| 3240 | dynamic_opts_t dopts; | ||
| 3241 | |||
| 3242 | LOG("core.valloc.entry", "size: %zu\n", size); | ||
| 3243 | |||
| 3244 | static_opts_init(&sopts); | ||
| 3245 | dynamic_opts_init(&dopts); | ||
| 3246 | |||
| 3247 | sopts.null_out_result_on_error = true; | ||
| 3248 | sopts.min_alignment = PAGE; | ||
| 3249 | sopts.oom_string = | ||
| 3250 | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||
| 3251 | sopts.invalid_alignment_string = | ||
| 3252 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||
| 3253 | |||
| 3254 | dopts.result = &ret; | ||
| 3255 | dopts.num_items = 1; | ||
| 3256 | dopts.item_size = size; | ||
| 3257 | dopts.alignment = PAGE; | ||
| 3258 | |||
| 3259 | imalloc(&sopts, &dopts); | ||
| 3260 | if (sopts.slow) { | ||
| 3261 | uintptr_t args[3] = {size}; | ||
| 3262 | hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args); | ||
| 3263 | } | ||
| 3264 | |||
| 3265 | LOG("core.valloc.exit", "result: %p\n", ret); | ||
| 3266 | return ret; | ||
| 3267 | } | ||
| 3268 | #endif | ||
| 3269 | |||
| 3270 | #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) | ||
| 3271 | /* | ||
| 3272 | * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible | ||
| 3273 | * to inconsistently reference libc's malloc(3)-compatible functions | ||
| 3274 | * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). | ||
| 3275 | * | ||
| 3276 | * These definitions interpose hooks in glibc. The functions are actually | ||
| 3277 | * passed an extra argument for the caller return address, which will be | ||
| 3278 | * ignored. | ||
| 3279 | */ | ||
| 3280 | #include <features.h> // defines __GLIBC__ if we are compiling against glibc | ||
| 3281 | |||
| 3282 | JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; | ||
| 3283 | JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; | ||
| 3284 | JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; | ||
| 3285 | # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK | ||
| 3286 | JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = | ||
| 3287 | je_memalign; | ||
| 3288 | # endif | ||
| 3289 | |||
| 3290 | # ifdef __GLIBC__ | ||
| 3291 | /* | ||
| 3292 | * To enable static linking with glibc, the libc specific malloc interface must | ||
| 3293 | * be implemented also, so none of glibc's malloc.o functions are added to the | ||
| 3294 | * link. | ||
| 3295 | */ | ||
| 3296 | # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) | ||
| 3297 | /* To force macro expansion of je_ prefix before stringification. */ | ||
| 3298 | # define PREALIAS(je_fn) ALIAS(je_fn) | ||
| 3299 | # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC | ||
| 3300 | void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); | ||
| 3301 | # endif | ||
| 3302 | # ifdef JEMALLOC_OVERRIDE___LIBC_FREE | ||
| 3303 | void __libc_free(void* ptr) PREALIAS(je_free); | ||
| 3304 | # endif | ||
| 3305 | # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC | ||
| 3306 | void *__libc_malloc(size_t size) PREALIAS(je_malloc); | ||
| 3307 | # endif | ||
| 3308 | # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN | ||
| 3309 | void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); | ||
| 3310 | # endif | ||
| 3311 | # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC | ||
| 3312 | void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); | ||
| 3313 | # endif | ||
| 3314 | # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC | ||
| 3315 | void *__libc_valloc(size_t size) PREALIAS(je_valloc); | ||
| 3316 | # endif | ||
| 3317 | # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN | ||
| 3318 | int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); | ||
| 3319 | # endif | ||
| 3320 | # undef PREALIAS | ||
| 3321 | # undef ALIAS | ||
| 3322 | # endif | ||
| 3323 | #endif | ||
| 3324 | |||
| 3325 | /* | ||
| 3326 | * End non-standard override functions. | ||
| 3327 | */ | ||
| 3328 | /******************************************************************************/ | ||
| 3329 | /* | ||
| 3330 | * Begin non-standard functions. | ||
| 3331 | */ | ||
| 3332 | |||
| 3333 | JEMALLOC_ALWAYS_INLINE unsigned | ||
| 3334 | mallocx_tcache_get(int flags) { | ||
| 3335 | if (likely((flags & MALLOCX_TCACHE_MASK) == 0)) { | ||
| 3336 | return TCACHE_IND_AUTOMATIC; | ||
| 3337 | } else if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { | ||
| 3338 | return TCACHE_IND_NONE; | ||
| 3339 | } else { | ||
| 3340 | return MALLOCX_TCACHE_GET(flags); | ||
| 3341 | } | ||
| 3342 | } | ||
| 3343 | |||
| 3344 | JEMALLOC_ALWAYS_INLINE unsigned | ||
| 3345 | mallocx_arena_get(int flags) { | ||
| 3346 | if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { | ||
| 3347 | return MALLOCX_ARENA_GET(flags); | ||
| 3348 | } else { | ||
| 3349 | return ARENA_IND_AUTOMATIC; | ||
| 3350 | } | ||
| 3351 | } | ||
| 3352 | |||
| 3353 | #ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API | ||
| 3354 | |||
| 3355 | #define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y | ||
| 3356 | #define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ | ||
| 3357 | JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) | ||
| 3358 | |||
| 3359 | typedef struct { | ||
| 3360 | void *ptr; | ||
| 3361 | size_t size; | ||
| 3362 | } smallocx_return_t; | ||
| 3363 | |||
| 3364 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 3365 | smallocx_return_t JEMALLOC_NOTHROW | ||
| 3366 | /* | ||
| 3367 | * The attribute JEMALLOC_ATTR(malloc) cannot be used due to: | ||
| 3368 | * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488 | ||
| 3369 | */ | ||
| 3370 | JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) | ||
| 3371 | (size_t size, int flags) { | ||
| 3372 | /* | ||
| 3373 | * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be | ||
| 3374 | * used here because it makes writing beyond the `size` | ||
| 3375 | * of the `ptr` undefined behavior, but the objective | ||
| 3376 | * of this function is to allow writing beyond `size` | ||
| 3377 | * up to `smallocx_return_t::size`. | ||
| 3378 | */ | ||
| 3379 | smallocx_return_t ret; | ||
| 3380 | static_opts_t sopts; | ||
| 3381 | dynamic_opts_t dopts; | ||
| 3382 | |||
| 3383 | LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags); | ||
| 3384 | |||
| 3385 | static_opts_init(&sopts); | ||
| 3386 | dynamic_opts_init(&dopts); | ||
| 3387 | |||
| 3388 | sopts.assert_nonempty_alloc = true; | ||
| 3389 | sopts.null_out_result_on_error = true; | ||
| 3390 | sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; | ||
| 3391 | sopts.usize = true; | ||
| 3392 | |||
| 3393 | dopts.result = &ret.ptr; | ||
| 3394 | dopts.num_items = 1; | ||
| 3395 | dopts.item_size = size; | ||
| 3396 | if (unlikely(flags != 0)) { | ||
| 3397 | dopts.alignment = MALLOCX_ALIGN_GET(flags); | ||
| 3398 | dopts.zero = MALLOCX_ZERO_GET(flags); | ||
| 3399 | dopts.tcache_ind = mallocx_tcache_get(flags); | ||
| 3400 | dopts.arena_ind = mallocx_arena_get(flags); | ||
| 3401 | } | ||
| 3402 | |||
| 3403 | imalloc(&sopts, &dopts); | ||
| 3404 | assert(dopts.usize == je_nallocx(size, flags)); | ||
| 3405 | ret.size = dopts.usize; | ||
| 3406 | |||
| 3407 | LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size); | ||
| 3408 | return ret; | ||
| 3409 | } | ||
| 3410 | #undef JEMALLOC_SMALLOCX_CONCAT_HELPER | ||
| 3411 | #undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 | ||
| 3412 | #endif | ||
| 3413 | |||
| 3414 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 3415 | void JEMALLOC_NOTHROW * | ||
| 3416 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) | ||
| 3417 | je_mallocx(size_t size, int flags) { | ||
| 3418 | void *ret; | ||
| 3419 | static_opts_t sopts; | ||
| 3420 | dynamic_opts_t dopts; | ||
| 3421 | |||
| 3422 | LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); | ||
| 3423 | |||
| 3424 | static_opts_init(&sopts); | ||
| 3425 | dynamic_opts_init(&dopts); | ||
| 3426 | |||
| 3427 | sopts.assert_nonempty_alloc = true; | ||
| 3428 | sopts.null_out_result_on_error = true; | ||
| 3429 | sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; | ||
| 3430 | |||
| 3431 | dopts.result = &ret; | ||
| 3432 | dopts.num_items = 1; | ||
| 3433 | dopts.item_size = size; | ||
| 3434 | if (unlikely(flags != 0)) { | ||
| 3435 | dopts.alignment = MALLOCX_ALIGN_GET(flags); | ||
| 3436 | dopts.zero = MALLOCX_ZERO_GET(flags); | ||
| 3437 | dopts.tcache_ind = mallocx_tcache_get(flags); | ||
| 3438 | dopts.arena_ind = mallocx_arena_get(flags); | ||
| 3439 | } | ||
| 3440 | |||
| 3441 | imalloc(&sopts, &dopts); | ||
| 3442 | if (sopts.slow) { | ||
| 3443 | uintptr_t args[3] = {size, flags}; | ||
| 3444 | hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret, | ||
| 3445 | args); | ||
| 3446 | } | ||
| 3447 | |||
| 3448 | LOG("core.mallocx.exit", "result: %p", ret); | ||
| 3449 | return ret; | ||
| 3450 | } | ||
| 3451 | |||
| 3452 | static void * | ||
| 3453 | irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, | ||
| 3454 | size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, | ||
| 3455 | prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { | ||
| 3456 | void *p; | ||
| 3457 | |||
| 3458 | if (tctx == NULL) { | ||
| 3459 | return NULL; | ||
| 3460 | } | ||
| 3461 | |||
| 3462 | alignment = prof_sample_align(alignment); | ||
| 3463 | if (usize <= SC_SMALL_MAXCLASS) { | ||
| 3464 | p = iralloct(tsdn, old_ptr, old_usize, | ||
| 3465 | SC_LARGE_MINCLASS, alignment, zero, tcache, | ||
| 3466 | arena, hook_args); | ||
| 3467 | if (p == NULL) { | ||
| 3468 | return NULL; | ||
| 3469 | } | ||
| 3470 | arena_prof_promote(tsdn, p, usize); | ||
| 3471 | } else { | ||
| 3472 | p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, | ||
| 3473 | tcache, arena, hook_args); | ||
| 3474 | } | ||
| 3475 | assert(prof_sample_aligned(p)); | ||
| 3476 | |||
| 3477 | return p; | ||
| 3478 | } | ||
| 3479 | |||
| 3480 | JEMALLOC_ALWAYS_INLINE void * | ||
| 3481 | irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, | ||
| 3482 | size_t alignment, size_t usize, bool zero, tcache_t *tcache, | ||
| 3483 | arena_t *arena, emap_alloc_ctx_t *alloc_ctx, | ||
| 3484 | hook_ralloc_args_t *hook_args) { | ||
| 3485 | prof_info_t old_prof_info; | ||
| 3486 | prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info); | ||
| 3487 | bool prof_active = prof_active_get_unlocked(); | ||
| 3488 | bool sample_event = te_prof_sample_event_lookahead(tsd, usize); | ||
| 3489 | prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event); | ||
| 3490 | void *p; | ||
| 3491 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | ||
| 3492 | p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, | ||
| 3493 | usize, alignment, zero, tcache, arena, tctx, hook_args); | ||
| 3494 | } else { | ||
| 3495 | p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, | ||
| 3496 | zero, tcache, arena, hook_args); | ||
| 3497 | } | ||
| 3498 | if (unlikely(p == NULL)) { | ||
| 3499 | prof_alloc_rollback(tsd, tctx); | ||
| 3500 | return NULL; | ||
| 3501 | } | ||
| 3502 | assert(usize == isalloc(tsd_tsdn(tsd), p)); | ||
| 3503 | prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr, | ||
| 3504 | old_usize, &old_prof_info, sample_event); | ||
| 3505 | |||
| 3506 | return p; | ||
| 3507 | } | ||
| 3508 | |||
| 3509 | static void * | ||
| 3510 | do_rallocx(void *ptr, size_t size, int flags, bool is_realloc, size_t *old_usable_size, size_t *new_usable_size) { | ||
| 3511 | void *p; | ||
| 3512 | tsd_t *tsd; | ||
| 3513 | size_t usize; | ||
| 3514 | size_t old_usize; | ||
| 3515 | size_t alignment = MALLOCX_ALIGN_GET(flags); | ||
| 3516 | arena_t *arena; | ||
| 3517 | |||
| 3518 | assert(ptr != NULL); | ||
| 3519 | assert(size != 0); | ||
| 3520 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 3521 | tsd = tsd_fetch(); | ||
| 3522 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3523 | |||
| 3524 | bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); | ||
| 3525 | |||
| 3526 | unsigned arena_ind = mallocx_arena_get(flags); | ||
| 3527 | if (arena_get_from_ind(tsd, arena_ind, &arena)) { | ||
| 3528 | goto label_oom; | ||
| 3529 | } | ||
| 3530 | |||
| 3531 | unsigned tcache_ind = mallocx_tcache_get(flags); | ||
| 3532 | tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, | ||
| 3533 | /* slow */ true, /* is_alloc */ true); | ||
| 3534 | |||
| 3535 | emap_alloc_ctx_t alloc_ctx; | ||
| 3536 | emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, | ||
| 3537 | &alloc_ctx); | ||
| 3538 | assert(alloc_ctx.szind != SC_NSIZES); | ||
| 3539 | old_usize = sz_index2size(alloc_ctx.szind); | ||
| 3540 | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | ||
| 3541 | if (aligned_usize_get(size, alignment, &usize, NULL, false)) { | ||
| 3542 | goto label_oom; | ||
| 3543 | } | ||
| 3544 | |||
| 3545 | hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size, | ||
| 3546 | flags, 0}}; | ||
| 3547 | if (config_prof && opt_prof) { | ||
| 3548 | p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize, | ||
| 3549 | zero, tcache, arena, &alloc_ctx, &hook_args); | ||
| 3550 | if (unlikely(p == NULL)) { | ||
| 3551 | goto label_oom; | ||
| 3552 | } | ||
| 3553 | } else { | ||
| 3554 | p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, | ||
| 3555 | zero, tcache, arena, &hook_args); | ||
| 3556 | if (unlikely(p == NULL)) { | ||
| 3557 | goto label_oom; | ||
| 3558 | } | ||
| 3559 | assert(usize == isalloc(tsd_tsdn(tsd), p)); | ||
| 3560 | } | ||
| 3561 | assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); | ||
| 3562 | thread_alloc_event(tsd, usize); | ||
| 3563 | thread_dalloc_event(tsd, old_usize); | ||
| 3564 | |||
| 3565 | UTRACE(ptr, size, p); | ||
| 3566 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3567 | |||
| 3568 | if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize | ||
| 3569 | && !zero) { | ||
| 3570 | size_t excess_len = usize - old_usize; | ||
| 3571 | void *excess_start = (void *)((uintptr_t)p + old_usize); | ||
| 3572 | junk_alloc_callback(excess_start, excess_len); | ||
| 3573 | } | ||
| 3574 | |||
| 3575 | if (old_usable_size) *old_usable_size = old_usize; | ||
| 3576 | if (new_usable_size) *new_usable_size = usize; | ||
| 3577 | return p; | ||
| 3578 | label_oom: | ||
| 3579 | if (config_xmalloc && unlikely(opt_xmalloc)) { | ||
| 3580 | malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); | ||
| 3581 | abort(); | ||
| 3582 | } | ||
| 3583 | UTRACE(ptr, size, 0); | ||
| 3584 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3585 | |||
| 3586 | return NULL; | ||
| 3587 | } | ||
| 3588 | |||
| 3589 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 3590 | void JEMALLOC_NOTHROW * | ||
| 3591 | JEMALLOC_ALLOC_SIZE(2) | ||
| 3592 | je_rallocx(void *ptr, size_t size, int flags) { | ||
| 3593 | LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, | ||
| 3594 | size, flags); | ||
| 3595 | void *ret = do_rallocx(ptr, size, flags, false, NULL, NULL); | ||
| 3596 | LOG("core.rallocx.exit", "result: %p", ret); | ||
| 3597 | return ret; | ||
| 3598 | } | ||
| 3599 | |||
| 3600 | static void * | ||
| 3601 | do_realloc_nonnull_zero(void *ptr, size_t *old_usize, size_t *new_usize) { | ||
| 3602 | if (config_stats) { | ||
| 3603 | atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED); | ||
| 3604 | } | ||
| 3605 | if (opt_zero_realloc_action == zero_realloc_action_alloc) { | ||
| 3606 | /* | ||
| 3607 | * The user might have gotten an alloc setting while expecting a | ||
| 3608 | * free setting. If that's the case, we at least try to | ||
| 3609 | * reduce the harm, and turn off the tcache while allocating, so | ||
| 3610 | * that we'll get a true first fit. | ||
| 3611 | */ | ||
| 3612 | return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true, old_usize, new_usize); | ||
| 3613 | } else if (opt_zero_realloc_action == zero_realloc_action_free) { | ||
| 3614 | UTRACE(ptr, 0, 0); | ||
| 3615 | tsd_t *tsd = tsd_fetch(); | ||
| 3616 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3617 | |||
| 3618 | tcache_t *tcache = tcache_get_from_ind(tsd, | ||
| 3619 | TCACHE_IND_AUTOMATIC, /* slow */ true, | ||
| 3620 | /* is_alloc */ false); | ||
| 3621 | uintptr_t args[3] = {(uintptr_t)ptr, 0}; | ||
| 3622 | hook_invoke_dalloc(hook_dalloc_realloc, ptr, args); | ||
| 3623 | size_t usize; | ||
| 3624 | ifree(tsd, ptr, tcache, true, &usize); | ||
| 3625 | if (old_usize) *old_usize = usize; | ||
| 3626 | if (new_usize) *new_usize = 0; | ||
| 3627 | |||
| 3628 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3629 | return NULL; | ||
| 3630 | } else { | ||
| 3631 | safety_check_fail("Called realloc(non-null-ptr, 0) with " | ||
| 3632 | "zero_realloc:abort set\n"); | ||
| 3633 | /* In real code, this will never run; the safety check failure | ||
| 3634 | * will call abort. In the unit test, we just want to bail out | ||
| 3635 | * without corrupting internal state that the test needs to | ||
| 3636 | * finish. | ||
| 3637 | */ | ||
| 3638 | return NULL; | ||
| 3639 | } | ||
| 3640 | } | ||
| 3641 | |||
| 3642 | static inline void *je_realloc_internal(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { | ||
| 3643 | LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); | ||
| 3644 | |||
| 3645 | if (likely(ptr != NULL && size != 0)) { | ||
| 3646 | void *ret = do_rallocx(ptr, size, 0, true, old_usize, new_usize); | ||
| 3647 | LOG("core.realloc.exit", "result: %p", ret); | ||
| 3648 | return ret; | ||
| 3649 | } else if (ptr != NULL && size == 0) { | ||
| 3650 | void *ret = do_realloc_nonnull_zero(ptr, old_usize, new_usize); | ||
| 3651 | LOG("core.realloc.exit", "result: %p", ret); | ||
| 3652 | return ret; | ||
| 3653 | } else { | ||
| 3654 | /* realloc(NULL, size) is equivalent to malloc(size). */ | ||
| 3655 | void *ret; | ||
| 3656 | |||
| 3657 | static_opts_t sopts; | ||
| 3658 | dynamic_opts_t dopts; | ||
| 3659 | |||
| 3660 | static_opts_init(&sopts); | ||
| 3661 | dynamic_opts_init(&dopts); | ||
| 3662 | |||
| 3663 | sopts.null_out_result_on_error = true; | ||
| 3664 | sopts.set_errno_on_error = true; | ||
| 3665 | sopts.oom_string = | ||
| 3666 | "<jemalloc>: Error in realloc(): out of memory\n"; | ||
| 3667 | |||
| 3668 | dopts.result = &ret; | ||
| 3669 | dopts.num_items = 1; | ||
| 3670 | dopts.item_size = size; | ||
| 3671 | |||
| 3672 | imalloc(&sopts, &dopts); | ||
| 3673 | if (sopts.slow) { | ||
| 3674 | uintptr_t args[3] = {(uintptr_t)ptr, size}; | ||
| 3675 | hook_invoke_alloc(hook_alloc_realloc, ret, | ||
| 3676 | (uintptr_t)ret, args); | ||
| 3677 | } | ||
| 3678 | LOG("core.realloc.exit", "result: %p", ret); | ||
| 3679 | if (old_usize) *old_usize = 0; | ||
| 3680 | if (new_usize) *new_usize = dopts.usize; | ||
| 3681 | return ret; | ||
| 3682 | } | ||
| 3683 | } | ||
| 3684 | |||
| 3685 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 3686 | void JEMALLOC_NOTHROW * | ||
| 3687 | JEMALLOC_ALLOC_SIZE(2) | ||
| 3688 | je_realloc(void *ptr, size_t size) { | ||
| 3689 | return je_realloc_internal(ptr, size, NULL, NULL); | ||
| 3690 | } | ||
| 3691 | |||
| 3692 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 3693 | ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, | ||
| 3694 | size_t extra, size_t alignment, bool zero) { | ||
| 3695 | size_t newsize; | ||
| 3696 | |||
| 3697 | if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero, | ||
| 3698 | &newsize)) { | ||
| 3699 | return old_usize; | ||
| 3700 | } | ||
| 3701 | |||
| 3702 | return newsize; | ||
| 3703 | } | ||
| 3704 | |||
| 3705 | static size_t | ||
| 3706 | ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, | ||
| 3707 | size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { | ||
| 3708 | /* Sampled allocation needs to be page aligned. */ | ||
| 3709 | if (tctx == NULL || !prof_sample_aligned(ptr)) { | ||
| 3710 | return old_usize; | ||
| 3711 | } | ||
| 3712 | |||
| 3713 | return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, | ||
| 3714 | zero); | ||
| 3715 | } | ||
| 3716 | |||
| 3717 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 3718 | ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, | ||
| 3719 | size_t extra, size_t alignment, bool zero, emap_alloc_ctx_t *alloc_ctx) { | ||
| 3720 | /* | ||
| 3721 | * old_prof_info is only used for asserting that the profiling info | ||
| 3722 | * isn't changed by the ixalloc() call. | ||
| 3723 | */ | ||
| 3724 | prof_info_t old_prof_info; | ||
| 3725 | prof_info_get(tsd, ptr, alloc_ctx, &old_prof_info); | ||
| 3726 | |||
| 3727 | /* | ||
| 3728 | * usize isn't knowable before ixalloc() returns when extra is non-zero. | ||
| 3729 | * Therefore, compute its maximum possible value and use that in | ||
| 3730 | * prof_alloc_prep() to decide whether to capture a backtrace. | ||
| 3731 | * prof_realloc() will use the actual usize to decide whether to sample. | ||
| 3732 | */ | ||
| 3733 | size_t usize_max; | ||
| 3734 | if (aligned_usize_get(size + extra, alignment, &usize_max, NULL, | ||
| 3735 | false)) { | ||
| 3736 | /* | ||
| 3737 | * usize_max is out of range, and chances are that allocation | ||
| 3738 | * will fail, but use the maximum possible value and carry on | ||
| 3739 | * with prof_alloc_prep(), just in case allocation succeeds. | ||
| 3740 | */ | ||
| 3741 | usize_max = SC_LARGE_MAXCLASS; | ||
| 3742 | } | ||
| 3743 | bool prof_active = prof_active_get_unlocked(); | ||
| 3744 | bool sample_event = te_prof_sample_event_lookahead(tsd, usize_max); | ||
| 3745 | prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event); | ||
| 3746 | |||
| 3747 | size_t usize; | ||
| 3748 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | ||
| 3749 | usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, | ||
| 3750 | size, extra, alignment, zero, tctx); | ||
| 3751 | } else { | ||
| 3752 | usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, | ||
| 3753 | extra, alignment, zero); | ||
| 3754 | } | ||
| 3755 | |||
| 3756 | /* | ||
| 3757 | * At this point we can still safely get the original profiling | ||
| 3758 | * information associated with the ptr, because (a) the edata_t object | ||
| 3759 | * associated with the ptr still lives and (b) the profiling info | ||
| 3760 | * fields are not touched. "(a)" is asserted in the outer je_xallocx() | ||
| 3761 | * function, and "(b)" is indirectly verified below by checking that | ||
| 3762 | * the alloc_tctx field is unchanged. | ||
| 3763 | */ | ||
| 3764 | prof_info_t prof_info; | ||
| 3765 | if (usize == old_usize) { | ||
| 3766 | prof_info_get(tsd, ptr, alloc_ctx, &prof_info); | ||
| 3767 | prof_alloc_rollback(tsd, tctx); | ||
| 3768 | } else { | ||
| 3769 | prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info); | ||
| 3770 | assert(usize <= usize_max); | ||
| 3771 | sample_event = te_prof_sample_event_lookahead(tsd, usize); | ||
| 3772 | prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr, | ||
| 3773 | old_usize, &prof_info, sample_event); | ||
| 3774 | } | ||
| 3775 | |||
| 3776 | assert(old_prof_info.alloc_tctx == prof_info.alloc_tctx); | ||
| 3777 | return usize; | ||
| 3778 | } | ||
| 3779 | |||
| 3780 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | ||
| 3781 | je_xallocx(void *ptr, size_t size, size_t extra, int flags) { | ||
| 3782 | tsd_t *tsd; | ||
| 3783 | size_t usize, old_usize; | ||
| 3784 | size_t alignment = MALLOCX_ALIGN_GET(flags); | ||
| 3785 | bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); | ||
| 3786 | |||
| 3787 | LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " | ||
| 3788 | "flags: %d", ptr, size, extra, flags); | ||
| 3789 | |||
| 3790 | assert(ptr != NULL); | ||
| 3791 | assert(size != 0); | ||
| 3792 | assert(SIZE_T_MAX - size >= extra); | ||
| 3793 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 3794 | tsd = tsd_fetch(); | ||
| 3795 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3796 | |||
| 3797 | /* | ||
| 3798 | * old_edata is only for verifying that xallocx() keeps the edata_t | ||
| 3799 | * object associated with the ptr (though the content of the edata_t | ||
| 3800 | * object can be changed). | ||
| 3801 | */ | ||
| 3802 | edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd), | ||
| 3803 | &arena_emap_global, ptr); | ||
| 3804 | |||
| 3805 | emap_alloc_ctx_t alloc_ctx; | ||
| 3806 | emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, | ||
| 3807 | &alloc_ctx); | ||
| 3808 | assert(alloc_ctx.szind != SC_NSIZES); | ||
| 3809 | old_usize = sz_index2size(alloc_ctx.szind); | ||
| 3810 | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | ||
| 3811 | /* | ||
| 3812 | * The API explicitly absolves itself of protecting against (size + | ||
| 3813 | * extra) numerical overflow, but we may need to clamp extra to avoid | ||
| 3814 | * exceeding SC_LARGE_MAXCLASS. | ||
| 3815 | * | ||
| 3816 | * Ordinarily, size limit checking is handled deeper down, but here we | ||
| 3817 | * have to check as part of (size + extra) clamping, since we need the | ||
| 3818 | * clamped value in the above helper functions. | ||
| 3819 | */ | ||
| 3820 | if (unlikely(size > SC_LARGE_MAXCLASS)) { | ||
| 3821 | usize = old_usize; | ||
| 3822 | goto label_not_resized; | ||
| 3823 | } | ||
| 3824 | if (unlikely(SC_LARGE_MAXCLASS - size < extra)) { | ||
| 3825 | extra = SC_LARGE_MAXCLASS - size; | ||
| 3826 | } | ||
| 3827 | |||
| 3828 | if (config_prof && opt_prof) { | ||
| 3829 | usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, | ||
| 3830 | alignment, zero, &alloc_ctx); | ||
| 3831 | } else { | ||
| 3832 | usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, | ||
| 3833 | extra, alignment, zero); | ||
| 3834 | } | ||
| 3835 | |||
| 3836 | /* | ||
| 3837 | * xallocx() should keep using the same edata_t object (though its | ||
| 3838 | * content can be changed). | ||
| 3839 | */ | ||
| 3840 | assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr) | ||
| 3841 | == old_edata); | ||
| 3842 | |||
| 3843 | if (unlikely(usize == old_usize)) { | ||
| 3844 | goto label_not_resized; | ||
| 3845 | } | ||
| 3846 | thread_alloc_event(tsd, usize); | ||
| 3847 | thread_dalloc_event(tsd, old_usize); | ||
| 3848 | |||
| 3849 | if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize && | ||
| 3850 | !zero) { | ||
| 3851 | size_t excess_len = usize - old_usize; | ||
| 3852 | void *excess_start = (void *)((uintptr_t)ptr + old_usize); | ||
| 3853 | junk_alloc_callback(excess_start, excess_len); | ||
| 3854 | } | ||
| 3855 | label_not_resized: | ||
| 3856 | if (unlikely(!tsd_fast(tsd))) { | ||
| 3857 | uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags}; | ||
| 3858 | hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, | ||
| 3859 | usize, (uintptr_t)usize, args); | ||
| 3860 | } | ||
| 3861 | |||
| 3862 | UTRACE(ptr, size, ptr); | ||
| 3863 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3864 | |||
| 3865 | LOG("core.xallocx.exit", "result: %zu", usize); | ||
| 3866 | return usize; | ||
| 3867 | } | ||
| 3868 | |||
| 3869 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | ||
| 3870 | JEMALLOC_ATTR(pure) | ||
| 3871 | je_sallocx(const void *ptr, int flags) { | ||
| 3872 | size_t usize; | ||
| 3873 | tsdn_t *tsdn; | ||
| 3874 | |||
| 3875 | LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); | ||
| 3876 | |||
| 3877 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 3878 | assert(ptr != NULL); | ||
| 3879 | |||
| 3880 | tsdn = tsdn_fetch(); | ||
| 3881 | check_entry_exit_locking(tsdn); | ||
| 3882 | |||
| 3883 | if (config_debug || force_ivsalloc) { | ||
| 3884 | usize = ivsalloc(tsdn, ptr); | ||
| 3885 | assert(force_ivsalloc || usize != 0); | ||
| 3886 | } else { | ||
| 3887 | usize = isalloc(tsdn, ptr); | ||
| 3888 | } | ||
| 3889 | |||
| 3890 | check_entry_exit_locking(tsdn); | ||
| 3891 | |||
| 3892 | LOG("core.sallocx.exit", "result: %zu", usize); | ||
| 3893 | return usize; | ||
| 3894 | } | ||
| 3895 | |||
| 3896 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW | ||
| 3897 | je_dallocx(void *ptr, int flags) { | ||
| 3898 | LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); | ||
| 3899 | |||
| 3900 | assert(ptr != NULL); | ||
| 3901 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 3902 | |||
| 3903 | tsd_t *tsd = tsd_fetch_min(); | ||
| 3904 | bool fast = tsd_fast(tsd); | ||
| 3905 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3906 | |||
| 3907 | unsigned tcache_ind = mallocx_tcache_get(flags); | ||
| 3908 | tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast, | ||
| 3909 | /* is_alloc */ false); | ||
| 3910 | |||
| 3911 | UTRACE(ptr, 0, 0); | ||
| 3912 | if (likely(fast)) { | ||
| 3913 | tsd_assert_fast(tsd); | ||
| 3914 | ifree(tsd, ptr, tcache, false, NULL); | ||
| 3915 | } else { | ||
| 3916 | uintptr_t args_raw[3] = {(uintptr_t)ptr, flags}; | ||
| 3917 | hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw); | ||
| 3918 | ifree(tsd, ptr, tcache, true, NULL); | ||
| 3919 | } | ||
| 3920 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3921 | |||
| 3922 | LOG("core.dallocx.exit", ""); | ||
| 3923 | } | ||
| 3924 | |||
| 3925 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 3926 | inallocx(tsdn_t *tsdn, size_t size, int flags) { | ||
| 3927 | check_entry_exit_locking(tsdn); | ||
| 3928 | size_t usize; | ||
| 3929 | /* In case of out of range, let the user see it rather than fail. */ | ||
| 3930 | aligned_usize_get(size, MALLOCX_ALIGN_GET(flags), &usize, NULL, false); | ||
| 3931 | check_entry_exit_locking(tsdn); | ||
| 3932 | return usize; | ||
| 3933 | } | ||
| 3934 | |||
| 3935 | JEMALLOC_NOINLINE void | ||
| 3936 | sdallocx_default(void *ptr, size_t size, int flags) { | ||
| 3937 | assert(ptr != NULL); | ||
| 3938 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 3939 | |||
| 3940 | tsd_t *tsd = tsd_fetch_min(); | ||
| 3941 | bool fast = tsd_fast(tsd); | ||
| 3942 | size_t usize = inallocx(tsd_tsdn(tsd), size, flags); | ||
| 3943 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3944 | |||
| 3945 | unsigned tcache_ind = mallocx_tcache_get(flags); | ||
| 3946 | tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast, | ||
| 3947 | /* is_alloc */ false); | ||
| 3948 | |||
| 3949 | UTRACE(ptr, 0, 0); | ||
| 3950 | if (likely(fast)) { | ||
| 3951 | tsd_assert_fast(tsd); | ||
| 3952 | isfree(tsd, ptr, usize, tcache, false); | ||
| 3953 | } else { | ||
| 3954 | uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags}; | ||
| 3955 | hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw); | ||
| 3956 | isfree(tsd, ptr, usize, tcache, true); | ||
| 3957 | } | ||
| 3958 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 3959 | } | ||
| 3960 | |||
| 3961 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW | ||
| 3962 | je_sdallocx(void *ptr, size_t size, int flags) { | ||
| 3963 | LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, | ||
| 3964 | size, flags); | ||
| 3965 | |||
| 3966 | if (flags != 0 || !free_fastpath(ptr, size, true, NULL)) { | ||
| 3967 | sdallocx_default(ptr, size, flags); | ||
| 3968 | } | ||
| 3969 | |||
| 3970 | LOG("core.sdallocx.exit", ""); | ||
| 3971 | } | ||
| 3972 | |||
| 3973 | void JEMALLOC_NOTHROW | ||
| 3974 | je_sdallocx_noflags(void *ptr, size_t size) { | ||
| 3975 | LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr, | ||
| 3976 | size); | ||
| 3977 | |||
| 3978 | if (!free_fastpath(ptr, size, true, NULL)) { | ||
| 3979 | sdallocx_default(ptr, size, 0); | ||
| 3980 | } | ||
| 3981 | |||
| 3982 | LOG("core.sdallocx.exit", ""); | ||
| 3983 | } | ||
| 3984 | |||
| 3985 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | ||
| 3986 | JEMALLOC_ATTR(pure) | ||
| 3987 | je_nallocx(size_t size, int flags) { | ||
| 3988 | size_t usize; | ||
| 3989 | tsdn_t *tsdn; | ||
| 3990 | |||
| 3991 | assert(size != 0); | ||
| 3992 | |||
| 3993 | if (unlikely(malloc_init())) { | ||
| 3994 | LOG("core.nallocx.exit", "result: %zu", ZU(0)); | ||
| 3995 | return 0; | ||
| 3996 | } | ||
| 3997 | |||
| 3998 | tsdn = tsdn_fetch(); | ||
| 3999 | check_entry_exit_locking(tsdn); | ||
| 4000 | |||
| 4001 | usize = inallocx(tsdn, size, flags); | ||
| 4002 | if (unlikely(usize > SC_LARGE_MAXCLASS)) { | ||
| 4003 | LOG("core.nallocx.exit", "result: %zu", ZU(0)); | ||
| 4004 | return 0; | ||
| 4005 | } | ||
| 4006 | |||
| 4007 | check_entry_exit_locking(tsdn); | ||
| 4008 | LOG("core.nallocx.exit", "result: %zu", usize); | ||
| 4009 | return usize; | ||
| 4010 | } | ||
| 4011 | |||
| 4012 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW | ||
| 4013 | je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, | ||
| 4014 | size_t newlen) { | ||
| 4015 | int ret; | ||
| 4016 | tsd_t *tsd; | ||
| 4017 | |||
| 4018 | LOG("core.mallctl.entry", "name: %s", name); | ||
| 4019 | |||
| 4020 | if (unlikely(malloc_init())) { | ||
| 4021 | LOG("core.mallctl.exit", "result: %d", EAGAIN); | ||
| 4022 | return EAGAIN; | ||
| 4023 | } | ||
| 4024 | |||
| 4025 | tsd = tsd_fetch(); | ||
| 4026 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4027 | ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); | ||
| 4028 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4029 | |||
| 4030 | LOG("core.mallctl.exit", "result: %d", ret); | ||
| 4031 | return ret; | ||
| 4032 | } | ||
| 4033 | |||
| 4034 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW | ||
| 4035 | je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { | ||
| 4036 | int ret; | ||
| 4037 | |||
| 4038 | LOG("core.mallctlnametomib.entry", "name: %s", name); | ||
| 4039 | |||
| 4040 | if (unlikely(malloc_init())) { | ||
| 4041 | LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); | ||
| 4042 | return EAGAIN; | ||
| 4043 | } | ||
| 4044 | |||
| 4045 | tsd_t *tsd = tsd_fetch(); | ||
| 4046 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4047 | ret = ctl_nametomib(tsd, name, mibp, miblenp); | ||
| 4048 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4049 | |||
| 4050 | LOG("core.mallctlnametomib.exit", "result: %d", ret); | ||
| 4051 | return ret; | ||
| 4052 | } | ||
| 4053 | |||
| 4054 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW | ||
| 4055 | je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | ||
| 4056 | void *newp, size_t newlen) { | ||
| 4057 | int ret; | ||
| 4058 | tsd_t *tsd; | ||
| 4059 | |||
| 4060 | LOG("core.mallctlbymib.entry", ""); | ||
| 4061 | |||
| 4062 | if (unlikely(malloc_init())) { | ||
| 4063 | LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); | ||
| 4064 | return EAGAIN; | ||
| 4065 | } | ||
| 4066 | |||
| 4067 | tsd = tsd_fetch(); | ||
| 4068 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4069 | ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); | ||
| 4070 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4071 | LOG("core.mallctlbymib.exit", "result: %d", ret); | ||
| 4072 | return ret; | ||
| 4073 | } | ||
| 4074 | |||
| 4075 | #define STATS_PRINT_BUFSIZE 65536 | ||
| 4076 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW | ||
| 4077 | je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, | ||
| 4078 | const char *opts) { | ||
| 4079 | tsdn_t *tsdn; | ||
| 4080 | |||
| 4081 | LOG("core.malloc_stats_print.entry", ""); | ||
| 4082 | |||
| 4083 | tsdn = tsdn_fetch(); | ||
| 4084 | check_entry_exit_locking(tsdn); | ||
| 4085 | |||
| 4086 | if (config_debug) { | ||
| 4087 | stats_print(write_cb, cbopaque, opts); | ||
| 4088 | } else { | ||
| 4089 | buf_writer_t buf_writer; | ||
| 4090 | buf_writer_init(tsdn, &buf_writer, write_cb, cbopaque, NULL, | ||
| 4091 | STATS_PRINT_BUFSIZE); | ||
| 4092 | stats_print(buf_writer_cb, &buf_writer, opts); | ||
| 4093 | buf_writer_terminate(tsdn, &buf_writer); | ||
| 4094 | } | ||
| 4095 | |||
| 4096 | check_entry_exit_locking(tsdn); | ||
| 4097 | LOG("core.malloc_stats_print.exit", ""); | ||
| 4098 | } | ||
| 4099 | #undef STATS_PRINT_BUFSIZE | ||
| 4100 | |||
| 4101 | JEMALLOC_ALWAYS_INLINE size_t | ||
| 4102 | je_malloc_usable_size_impl(JEMALLOC_USABLE_SIZE_CONST void *ptr) { | ||
| 4103 | assert(malloc_initialized() || IS_INITIALIZER); | ||
| 4104 | |||
| 4105 | tsdn_t *tsdn = tsdn_fetch(); | ||
| 4106 | check_entry_exit_locking(tsdn); | ||
| 4107 | |||
| 4108 | size_t ret; | ||
| 4109 | if (unlikely(ptr == NULL)) { | ||
| 4110 | ret = 0; | ||
| 4111 | } else { | ||
| 4112 | if (config_debug || force_ivsalloc) { | ||
| 4113 | ret = ivsalloc(tsdn, ptr); | ||
| 4114 | assert(force_ivsalloc || ret != 0); | ||
| 4115 | } else { | ||
| 4116 | ret = isalloc(tsdn, ptr); | ||
| 4117 | } | ||
| 4118 | } | ||
| 4119 | check_entry_exit_locking(tsdn); | ||
| 4120 | |||
| 4121 | return ret; | ||
| 4122 | } | ||
| 4123 | |||
| 4124 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | ||
| 4125 | je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { | ||
| 4126 | LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); | ||
| 4127 | |||
| 4128 | size_t ret = je_malloc_usable_size_impl(ptr); | ||
| 4129 | |||
| 4130 | LOG("core.malloc_usable_size.exit", "result: %zu", ret); | ||
| 4131 | return ret; | ||
| 4132 | } | ||
| 4133 | |||
| 4134 | #ifdef JEMALLOC_HAVE_MALLOC_SIZE | ||
| 4135 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | ||
| 4136 | je_malloc_size(const void *ptr) { | ||
| 4137 | LOG("core.malloc_size.entry", "ptr: %p", ptr); | ||
| 4138 | |||
| 4139 | size_t ret = je_malloc_usable_size_impl(ptr); | ||
| 4140 | |||
| 4141 | LOG("core.malloc_size.exit", "result: %zu", ret); | ||
| 4142 | return ret; | ||
| 4143 | } | ||
| 4144 | #endif | ||
| 4145 | |||
| 4146 | static void | ||
| 4147 | batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) { | ||
| 4148 | assert(config_prof && opt_prof); | ||
| 4149 | bool prof_sample_event = te_prof_sample_event_lookahead(tsd, | ||
| 4150 | batch * usize); | ||
| 4151 | assert(!prof_sample_event); | ||
| 4152 | size_t surplus; | ||
| 4153 | prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd, | ||
| 4154 | (batch + 1) * usize, &surplus); | ||
| 4155 | assert(prof_sample_event); | ||
| 4156 | assert(surplus < usize); | ||
| 4157 | } | ||
| 4158 | |||
| 4159 | size_t | ||
| 4160 | batch_alloc(void **ptrs, size_t num, size_t size, int flags) { | ||
| 4161 | LOG("core.batch_alloc.entry", | ||
| 4162 | "ptrs: %p, num: %zu, size: %zu, flags: %d", ptrs, num, size, flags); | ||
| 4163 | |||
| 4164 | tsd_t *tsd = tsd_fetch(); | ||
| 4165 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4166 | |||
| 4167 | size_t filled = 0; | ||
| 4168 | |||
| 4169 | if (unlikely(tsd == NULL || tsd_reentrancy_level_get(tsd) > 0)) { | ||
| 4170 | goto label_done; | ||
| 4171 | } | ||
| 4172 | |||
| 4173 | size_t alignment = MALLOCX_ALIGN_GET(flags); | ||
| 4174 | size_t usize; | ||
| 4175 | if (aligned_usize_get(size, alignment, &usize, NULL, false)) { | ||
| 4176 | goto label_done; | ||
| 4177 | } | ||
| 4178 | szind_t ind = sz_size2index(usize); | ||
| 4179 | bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); | ||
| 4180 | |||
| 4181 | /* | ||
| 4182 | * The cache bin and arena will be lazily initialized; it's hard to | ||
| 4183 | * know in advance whether each of them needs to be initialized. | ||
| 4184 | */ | ||
| 4185 | cache_bin_t *bin = NULL; | ||
| 4186 | arena_t *arena = NULL; | ||
| 4187 | |||
| 4188 | size_t nregs = 0; | ||
| 4189 | if (likely(ind < SC_NBINS)) { | ||
| 4190 | nregs = bin_infos[ind].nregs; | ||
| 4191 | assert(nregs > 0); | ||
| 4192 | } | ||
| 4193 | |||
| 4194 | while (filled < num) { | ||
| 4195 | size_t batch = num - filled; | ||
| 4196 | size_t surplus = SIZE_MAX; /* Dead store. */ | ||
| 4197 | bool prof_sample_event = config_prof && opt_prof | ||
| 4198 | && prof_active_get_unlocked() | ||
| 4199 | && te_prof_sample_event_lookahead_surplus(tsd, | ||
| 4200 | batch * usize, &surplus); | ||
| 4201 | |||
| 4202 | if (prof_sample_event) { | ||
| 4203 | /* | ||
| 4204 | * Adjust so that the batch does not trigger prof | ||
| 4205 | * sampling. | ||
| 4206 | */ | ||
| 4207 | batch -= surplus / usize + 1; | ||
| 4208 | batch_alloc_prof_sample_assert(tsd, batch, usize); | ||
| 4209 | } | ||
| 4210 | |||
| 4211 | size_t progress = 0; | ||
| 4212 | |||
| 4213 | if (likely(ind < SC_NBINS) && batch >= nregs) { | ||
| 4214 | if (arena == NULL) { | ||
| 4215 | unsigned arena_ind = mallocx_arena_get(flags); | ||
| 4216 | if (arena_get_from_ind(tsd, arena_ind, | ||
| 4217 | &arena)) { | ||
| 4218 | goto label_done; | ||
| 4219 | } | ||
| 4220 | if (arena == NULL) { | ||
| 4221 | arena = arena_choose(tsd, NULL); | ||
| 4222 | } | ||
| 4223 | if (unlikely(arena == NULL)) { | ||
| 4224 | goto label_done; | ||
| 4225 | } | ||
| 4226 | } | ||
| 4227 | size_t arena_batch = batch - batch % nregs; | ||
| 4228 | size_t n = arena_fill_small_fresh(tsd_tsdn(tsd), arena, | ||
| 4229 | ind, ptrs + filled, arena_batch, zero); | ||
| 4230 | progress += n; | ||
| 4231 | filled += n; | ||
| 4232 | } | ||
| 4233 | |||
| 4234 | if (likely(ind < nhbins) && progress < batch) { | ||
| 4235 | if (bin == NULL) { | ||
| 4236 | unsigned tcache_ind = mallocx_tcache_get(flags); | ||
| 4237 | tcache_t *tcache = tcache_get_from_ind(tsd, | ||
| 4238 | tcache_ind, /* slow */ true, | ||
| 4239 | /* is_alloc */ true); | ||
| 4240 | if (tcache != NULL) { | ||
| 4241 | bin = &tcache->bins[ind]; | ||
| 4242 | } | ||
| 4243 | } | ||
| 4244 | /* | ||
| 4245 | * If we don't have a tcache bin, we don't want to | ||
| 4246 | * immediately give up, because there's the possibility | ||
| 4247 | * that the user explicitly requested to bypass the | ||
| 4248 | * tcache, or that the user explicitly turned off the | ||
| 4249 | * tcache; in such cases, we go through the slow path, | ||
| 4250 | * i.e. the mallocx() call at the end of the while loop. | ||
| 4251 | */ | ||
| 4252 | if (bin != NULL) { | ||
| 4253 | size_t bin_batch = batch - progress; | ||
| 4254 | /* | ||
| 4255 | * n can be less than bin_batch, meaning that | ||
| 4256 | * the cache bin does not have enough memory. | ||
| 4257 | * In such cases, we rely on the slow path, | ||
| 4258 | * i.e. the mallocx() call at the end of the | ||
| 4259 | * while loop, to fill in the cache, and in the | ||
| 4260 | * next iteration of the while loop, the tcache | ||
| 4261 | * will contain a lot of memory, and we can | ||
| 4262 | * harvest them here. Compared to the | ||
| 4263 | * alternative approach where we directly go to | ||
| 4264 | * the arena bins here, the overhead of our | ||
| 4265 | * current approach should usually be minimal, | ||
| 4266 | * since we never try to fetch more memory than | ||
| 4267 | * what a slab contains via the tcache. An | ||
| 4268 | * additional benefit is that the tcache will | ||
| 4269 | * not be empty for the next allocation request. | ||
| 4270 | */ | ||
| 4271 | size_t n = cache_bin_alloc_batch(bin, bin_batch, | ||
| 4272 | ptrs + filled); | ||
| 4273 | if (config_stats) { | ||
| 4274 | bin->tstats.nrequests += n; | ||
| 4275 | } | ||
| 4276 | if (zero) { | ||
| 4277 | for (size_t i = 0; i < n; ++i) { | ||
| 4278 | memset(ptrs[filled + i], 0, | ||
| 4279 | usize); | ||
| 4280 | } | ||
| 4281 | } | ||
| 4282 | if (config_prof && opt_prof | ||
| 4283 | && unlikely(ind >= SC_NBINS)) { | ||
| 4284 | for (size_t i = 0; i < n; ++i) { | ||
| 4285 | prof_tctx_reset_sampled(tsd, | ||
| 4286 | ptrs[filled + i]); | ||
| 4287 | } | ||
| 4288 | } | ||
| 4289 | progress += n; | ||
| 4290 | filled += n; | ||
| 4291 | } | ||
| 4292 | } | ||
| 4293 | |||
| 4294 | /* | ||
| 4295 | * For thread events other than prof sampling, trigger them as | ||
| 4296 | * if there's a single allocation of size (n * usize). This is | ||
| 4297 | * fine because: | ||
| 4298 | * (a) these events do not alter the allocation itself, and | ||
| 4299 | * (b) it's possible that some event would have been triggered | ||
| 4300 | * multiple times, instead of only once, if the allocations | ||
| 4301 | * were handled individually, but it would do no harm (or | ||
| 4302 | * even be beneficial) to coalesce the triggerings. | ||
| 4303 | */ | ||
| 4304 | thread_alloc_event(tsd, progress * usize); | ||
| 4305 | |||
| 4306 | if (progress < batch || prof_sample_event) { | ||
| 4307 | void *p = je_mallocx(size, flags); | ||
| 4308 | if (p == NULL) { /* OOM */ | ||
| 4309 | break; | ||
| 4310 | } | ||
| 4311 | if (progress == batch) { | ||
| 4312 | assert(prof_sampled(tsd, p)); | ||
| 4313 | } | ||
| 4314 | ptrs[filled++] = p; | ||
| 4315 | } | ||
| 4316 | } | ||
| 4317 | |||
| 4318 | label_done: | ||
| 4319 | check_entry_exit_locking(tsd_tsdn(tsd)); | ||
| 4320 | LOG("core.batch_alloc.exit", "result: %zu", filled); | ||
| 4321 | return filled; | ||
| 4322 | } | ||
| 4323 | |||
| 4324 | /* | ||
| 4325 | * End non-standard functions. | ||
| 4326 | */ | ||
| 4327 | /******************************************************************************/ | ||
| 4328 | /* | ||
| 4329 | * The following functions are used by threading libraries for protection of | ||
| 4330 | * malloc during fork(). | ||
| 4331 | */ | ||
| 4332 | |||
| 4333 | /* | ||
| 4334 | * If an application creates a thread before doing any allocation in the main | ||
| 4335 | * thread, then calls fork(2) in the main thread followed by memory allocation | ||
| 4336 | * in the child process, a race can occur that results in deadlock within the | ||
| 4337 | * child: the main thread may have forked while the created thread had | ||
| 4338 | * partially initialized the allocator. Ordinarily jemalloc prevents | ||
| 4339 | * fork/malloc races via the following functions it registers during | ||
| 4340 | * initialization using pthread_atfork(), but of course that does no good if | ||
| 4341 | * the allocator isn't fully initialized at fork time. The following library | ||
| 4342 | * constructor is a partial solution to this problem. It may still be possible | ||
| 4343 | * to trigger the deadlock described above, but doing so would involve forking | ||
| 4344 | * via a library constructor that runs before jemalloc's runs. | ||
| 4345 | */ | ||
| 4346 | #ifndef JEMALLOC_JET | ||
| 4347 | JEMALLOC_ATTR(constructor) | ||
| 4348 | static void | ||
| 4349 | jemalloc_constructor(void) { | ||
| 4350 | malloc_init(); | ||
| 4351 | } | ||
| 4352 | #endif | ||
| 4353 | |||
| 4354 | #ifndef JEMALLOC_MUTEX_INIT_CB | ||
| 4355 | void | ||
| 4356 | jemalloc_prefork(void) | ||
| 4357 | #else | ||
| 4358 | JEMALLOC_EXPORT void | ||
| 4359 | _malloc_prefork(void) | ||
| 4360 | #endif | ||
| 4361 | { | ||
| 4362 | tsd_t *tsd; | ||
| 4363 | unsigned i, j, narenas; | ||
| 4364 | arena_t *arena; | ||
| 4365 | |||
| 4366 | #ifdef JEMALLOC_MUTEX_INIT_CB | ||
| 4367 | if (!malloc_initialized()) { | ||
| 4368 | return; | ||
| 4369 | } | ||
| 4370 | #endif | ||
| 4371 | assert(malloc_initialized()); | ||
| 4372 | |||
| 4373 | tsd = tsd_fetch(); | ||
| 4374 | |||
| 4375 | narenas = narenas_total_get(); | ||
| 4376 | |||
| 4377 | witness_prefork(tsd_witness_tsdp_get(tsd)); | ||
| 4378 | /* Acquire all mutexes in a safe order. */ | ||
| 4379 | ctl_prefork(tsd_tsdn(tsd)); | ||
| 4380 | tcache_prefork(tsd_tsdn(tsd)); | ||
| 4381 | malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); | ||
| 4382 | if (have_background_thread) { | ||
| 4383 | background_thread_prefork0(tsd_tsdn(tsd)); | ||
| 4384 | } | ||
| 4385 | prof_prefork0(tsd_tsdn(tsd)); | ||
| 4386 | if (have_background_thread) { | ||
| 4387 | background_thread_prefork1(tsd_tsdn(tsd)); | ||
| 4388 | } | ||
| 4389 | /* Break arena prefork into stages to preserve lock order. */ | ||
| 4390 | for (i = 0; i < 9; i++) { | ||
| 4391 | for (j = 0; j < narenas; j++) { | ||
| 4392 | if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != | ||
| 4393 | NULL) { | ||
| 4394 | switch (i) { | ||
| 4395 | case 0: | ||
| 4396 | arena_prefork0(tsd_tsdn(tsd), arena); | ||
| 4397 | break; | ||
| 4398 | case 1: | ||
| 4399 | arena_prefork1(tsd_tsdn(tsd), arena); | ||
| 4400 | break; | ||
| 4401 | case 2: | ||
| 4402 | arena_prefork2(tsd_tsdn(tsd), arena); | ||
| 4403 | break; | ||
| 4404 | case 3: | ||
| 4405 | arena_prefork3(tsd_tsdn(tsd), arena); | ||
| 4406 | break; | ||
| 4407 | case 4: | ||
| 4408 | arena_prefork4(tsd_tsdn(tsd), arena); | ||
| 4409 | break; | ||
| 4410 | case 5: | ||
| 4411 | arena_prefork5(tsd_tsdn(tsd), arena); | ||
| 4412 | break; | ||
| 4413 | case 6: | ||
| 4414 | arena_prefork6(tsd_tsdn(tsd), arena); | ||
| 4415 | break; | ||
| 4416 | case 7: | ||
| 4417 | arena_prefork7(tsd_tsdn(tsd), arena); | ||
| 4418 | break; | ||
| 4419 | case 8: | ||
| 4420 | arena_prefork8(tsd_tsdn(tsd), arena); | ||
| 4421 | break; | ||
| 4422 | default: not_reached(); | ||
| 4423 | } | ||
| 4424 | } | ||
| 4425 | } | ||
| 4426 | |||
| 4427 | } | ||
| 4428 | prof_prefork1(tsd_tsdn(tsd)); | ||
| 4429 | stats_prefork(tsd_tsdn(tsd)); | ||
| 4430 | tsd_prefork(tsd); | ||
| 4431 | } | ||
| 4432 | |||
| 4433 | #ifndef JEMALLOC_MUTEX_INIT_CB | ||
| 4434 | void | ||
| 4435 | jemalloc_postfork_parent(void) | ||
| 4436 | #else | ||
| 4437 | JEMALLOC_EXPORT void | ||
| 4438 | _malloc_postfork(void) | ||
| 4439 | #endif | ||
| 4440 | { | ||
| 4441 | tsd_t *tsd; | ||
| 4442 | unsigned i, narenas; | ||
| 4443 | |||
| 4444 | #ifdef JEMALLOC_MUTEX_INIT_CB | ||
| 4445 | if (!malloc_initialized()) { | ||
| 4446 | return; | ||
| 4447 | } | ||
| 4448 | #endif | ||
| 4449 | assert(malloc_initialized()); | ||
| 4450 | |||
| 4451 | tsd = tsd_fetch(); | ||
| 4452 | |||
| 4453 | tsd_postfork_parent(tsd); | ||
| 4454 | |||
| 4455 | witness_postfork_parent(tsd_witness_tsdp_get(tsd)); | ||
| 4456 | /* Release all mutexes, now that fork() has completed. */ | ||
| 4457 | stats_postfork_parent(tsd_tsdn(tsd)); | ||
| 4458 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { | ||
| 4459 | arena_t *arena; | ||
| 4460 | |||
| 4461 | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { | ||
| 4462 | arena_postfork_parent(tsd_tsdn(tsd), arena); | ||
| 4463 | } | ||
| 4464 | } | ||
| 4465 | prof_postfork_parent(tsd_tsdn(tsd)); | ||
| 4466 | if (have_background_thread) { | ||
| 4467 | background_thread_postfork_parent(tsd_tsdn(tsd)); | ||
| 4468 | } | ||
| 4469 | malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); | ||
| 4470 | tcache_postfork_parent(tsd_tsdn(tsd)); | ||
| 4471 | ctl_postfork_parent(tsd_tsdn(tsd)); | ||
| 4472 | } | ||
| 4473 | |||
| 4474 | void | ||
| 4475 | jemalloc_postfork_child(void) { | ||
| 4476 | tsd_t *tsd; | ||
| 4477 | unsigned i, narenas; | ||
| 4478 | |||
| 4479 | assert(malloc_initialized()); | ||
| 4480 | |||
| 4481 | tsd = tsd_fetch(); | ||
| 4482 | |||
| 4483 | tsd_postfork_child(tsd); | ||
| 4484 | |||
| 4485 | witness_postfork_child(tsd_witness_tsdp_get(tsd)); | ||
| 4486 | /* Release all mutexes, now that fork() has completed. */ | ||
| 4487 | stats_postfork_child(tsd_tsdn(tsd)); | ||
| 4488 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { | ||
| 4489 | arena_t *arena; | ||
| 4490 | |||
| 4491 | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { | ||
| 4492 | arena_postfork_child(tsd_tsdn(tsd), arena); | ||
| 4493 | } | ||
| 4494 | } | ||
| 4495 | prof_postfork_child(tsd_tsdn(tsd)); | ||
| 4496 | if (have_background_thread) { | ||
| 4497 | background_thread_postfork_child(tsd_tsdn(tsd)); | ||
| 4498 | } | ||
| 4499 | malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); | ||
| 4500 | tcache_postfork_child(tsd_tsdn(tsd)); | ||
| 4501 | ctl_postfork_child(tsd_tsdn(tsd)); | ||
| 4502 | } | ||
| 4503 | |||
| 4504 | /******************************************************************************/ | ||
| 4505 | |||
| 4506 | /* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation. | ||
| 4507 | * returns 1 if the allocation should be moved, and 0 if the allocation be kept. | ||
| 4508 | * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */ | ||
| 4509 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW | ||
| 4510 | get_defrag_hint(void* ptr) { | ||
| 4511 | assert(ptr != NULL); | ||
| 4512 | return iget_defrag_hint(TSDN_NULL, ptr); | ||
| 4513 | } | ||
| 4514 | |||
| 4515 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 4516 | void JEMALLOC_NOTHROW * | ||
| 4517 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) | ||
| 4518 | malloc_with_usize(size_t size, size_t *usize) { | ||
| 4519 | return je_malloc_internal(size, usize); | ||
| 4520 | } | ||
| 4521 | |||
| 4522 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 4523 | void JEMALLOC_NOTHROW * | ||
| 4524 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) | ||
| 4525 | calloc_with_usize(size_t num, size_t size, size_t *usize) { | ||
| 4526 | return je_calloc_internal(num, size, usize); | ||
| 4527 | } | ||
| 4528 | |||
| 4529 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||
| 4530 | void JEMALLOC_NOTHROW * | ||
| 4531 | JEMALLOC_ALLOC_SIZE(2) | ||
| 4532 | realloc_with_usize(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { | ||
| 4533 | return je_realloc_internal(ptr, size, old_usize, new_usize); | ||
| 4534 | } | ||
| 4535 | |||
| 4536 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW | ||
| 4537 | free_with_usize(void *ptr, size_t *usize) { | ||
| 4538 | je_free_internal(ptr, usize); | ||
| 4539 | } | ||
