summaryrefslogtreecommitdiff
path: root/examples/redis-unstable/deps/jemalloc/test/unit
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-01-21 22:52:54 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-01-21 22:52:54 +0100
commitdcacc00e3750300617ba6e16eb346713f91a783a (patch)
tree38e2d4fb5ed9d119711d4295c6eda4b014af73fd /examples/redis-unstable/deps/jemalloc/test/unit
parent58dac10aeb8f5a041c46bddbeaf4c7966a99b998 (diff)
downloadcrep-dcacc00e3750300617ba6e16eb346713f91a783a.tar.gz
Remove testing data
Diffstat (limited to 'examples/redis-unstable/deps/jemalloc/test/unit')
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/SFMT.c1599
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/a0.c16
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.c436
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/arena_reset.c361
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.c4
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/atomic.c229
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/background_thread.c118
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/background_thread_enable.c96
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/base.c265
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.c189
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.c1
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/binshard.c154
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/binshard.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/bit_util.c307
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/bitmap.c343
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/buf_writer.c196
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/cache_bin.c384
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/ckh.c211
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/counter.c80
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/decay.c283
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/div.c29
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/double_free.c77
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/double_free.h1
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c226
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/emitter.c533
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/extent_quantize.c141
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/fb.c954
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/fork.c141
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/fxp.c394
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/hash.c173
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/hook.c586
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/hpa.c459
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.c188
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.sh4
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/hpdata.c244
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/huge.c108
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/inspect.c278
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/inspect.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/junk.c195
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/junk.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.c1
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/junk_free.c1
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/junk_free.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/log.c198
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/mallctl.c1274
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.c29
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.sh1
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/malloc_io.c268
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/math.c390
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/mpsc_queue.c304
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/mq.c89
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/mtx.c57
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/nstime.c252
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/oversize_threshold.c133
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/pa.c126
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/pack.c166
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/pack.sh4
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/pages.c29
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/peak.c47
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/ph.c330
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prng.c189
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.c84
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_active.c119
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_active.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.c77
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.sh6
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.c169
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.sh6
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.c57
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.sh8
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_log.c151
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_log.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.c216
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.sh6
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.c678
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.c266
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.c151
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.c77
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.c48
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.c122
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/psset.c748
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/ql.c317
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/qr.c243
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/rb.c1019
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/retained.c188
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/rtree.c289
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/safety_check.c163
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/safety_check.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/san.c207
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/san.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/san_bump.c111
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/sc.c33
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/sec.c634
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/seq.c95
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/size_check.c79
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/size_check.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/size_classes.c188
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/slab.c39
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/smoothstep.c102
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/spin.c18
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/stats.c431
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/stats_print.c999
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/sz.c66
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.c175
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/test_hooks.c38
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/thread_event.c34
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/thread_event.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/ticker.c100
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/tsd.c274
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/uaf.c262
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/witness.c280
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero.c59
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero.sh5
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.c26
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.c48
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.c33
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.sh3
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.c40
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.sh3
134 files changed, 0 insertions, 23588 deletions
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/SFMT.c b/examples/redis-unstable/deps/jemalloc/test/unit/SFMT.c
deleted file mode 100644
index b9f85dd..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/SFMT.c
+++ /dev/null
@@ -1,1599 +0,0 @@
1/*
2 * This file derives from SFMT 1.3.3
3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
4 * released under the terms of the following license:
5 *
6 * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
7 * University. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are
11 * met:
12 *
13 * * Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * * Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 * * Neither the name of the Hiroshima University nor the names of
20 * its contributors may be used to endorse or promote products
21 * derived from this software without specific prior written
22 * permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
30 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
32 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36#include "test/jemalloc_test.h"
37
38#define BLOCK_SIZE 10000
39#define BLOCK_SIZE64 (BLOCK_SIZE / 2)
40#define COUNT_1 1000
41#define COUNT_2 700
42
43static const uint32_t init_gen_rand_32_expected[] = {
44 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
45 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U,
46 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
47 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
48 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U,
49 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U,
50 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
51 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U,
52 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
53 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U,
54 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
55 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
56 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U,
57 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
58 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
59 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U,
60 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
61 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
62 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
63 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
64 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U,
65 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U,
66 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
67 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U,
68 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
69 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U,
70 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
71 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
72 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U,
73 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U,
74 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U,
75 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
76 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U,
77 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U,
78 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U,
79 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
80 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U,
81 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
82 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
83 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
84 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U,
85 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U,
86 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U,
87 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
88 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
89 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U,
90 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U,
91 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U,
92 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
93 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U,
94 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
95 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U,
96 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U,
97 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U,
98 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U,
99 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U,
100 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U,
101 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
102 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U,
103 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U,
104 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U,
105 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U,
106 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U,
107 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U,
108 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
109 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
110 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U,
111 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
112 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U,
113 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
114 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U,
115 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U,
116 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U,
117 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U,
118 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
119 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U,
120 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U,
121 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U,
122 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
123 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U,
124 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
125 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U,
126 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U,
127 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
128 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U,
129 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U,
130 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U,
131 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U,
132 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U,
133 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U,
134 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U,
135 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
136 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U,
137 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U,
138 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
139 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U,
140 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
141 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U,
142 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
143 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
144 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U,
145 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
146 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
147 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U,
148 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U,
149 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
150 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U,
151 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U,
152 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
153 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U,
154 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U,
155 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
156 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U,
157 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
158 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U,
159 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U,
160 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
161 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U,
162 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U,
163 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U,
164 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
165 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U,
166 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U,
167 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
168 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
169 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U,
170 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
171 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U,
172 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U,
173 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U,
174 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U,
175 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
176 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U,
177 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U,
178 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
179 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
180 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U,
181 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U,
182 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
183 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U,
184 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U,
185 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U,
186 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U,
187 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U,
188 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
189 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U,
190 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U,
191 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U,
192 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U,
193 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U,
194 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U,
195 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
196 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
197 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U,
198 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
199 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U,
200 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U,
201 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
202 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U,
203 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U,
204 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U,
205 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U,
206 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
207 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U,
208 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U,
209 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
210 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U,
211 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U,
212 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U,
213 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U,
214 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U,
215 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U,
216 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
217 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
218 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
219 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
220 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U,
221 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U,
222 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U,
223 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U,
224 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
225 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U,
226 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
227 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U,
228 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U,
229 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
230 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U,
231 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
232 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U,
233 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U,
234 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U,
235 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U,
236 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U,
237 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
238 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
239 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U,
240 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U,
241 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U,
242 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U,
243 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U
244};
245static const uint32_t init_by_array_32_expected[] = {
246 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U,
247 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U,
248 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
249 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U,
250 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
251 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U,
252 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U,
253 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
254 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U,
255 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
256 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U,
257 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
258 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U,
259 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U,
260 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U,
261 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U,
262 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U,
263 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U,
264 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U,
265 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U,
266 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
267 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
268 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
269 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U,
270 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
271 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
272 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U,
273 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U,
274 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U,
275 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U,
276 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U,
277 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
278 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
279 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
280 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U,
281 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U,
282 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U,
283 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U,
284 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U,
285 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
286 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
287 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
288 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U,
289 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U,
290 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U,
291 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
292 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U,
293 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
294 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
295 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U,
296 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U,
297 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
298 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
299 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
300 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U,
301 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U,
302 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U,
303 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U,
304 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U,
305 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U,
306 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
307 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U,
308 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U,
309 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U,
310 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
311 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U,
312 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U,
313 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U,
314 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U,
315 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
316 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U,
317 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U,
318 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U,
319 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U,
320 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U,
321 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U,
322 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
323 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
324 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
325 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
326 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U,
327 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
328 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
329 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
330 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
331 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U,
332 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U,
333 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U,
334 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U,
335 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
336 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U,
337 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U,
338 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U,
339 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U,
340 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U,
341 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
342 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U,
343 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U,
344 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U,
345 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
346 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U,
347 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U,
348 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U,
349 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U,
350 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U,
351 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U,
352 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U,
353 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U,
354 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U,
355 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U,
356 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U,
357 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
358 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
359 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U,
360 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U,
361 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
362 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U,
363 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
364 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U,
365 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
366 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U,
367 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U,
368 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
369 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U,
370 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U,
371 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U,
372 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U,
373 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U,
374 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U,
375 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
376 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U,
377 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
378 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
379 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U,
380 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U,
381 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
382 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U,
383 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U,
384 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U,
385 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U,
386 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
387 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U,
388 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
389 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
390 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U,
391 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U,
392 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U,
393 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U,
394 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U,
395 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U,
396 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
397 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
398 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U,
399 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U,
400 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
401 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U,
402 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U,
403 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
404 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
405 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
406 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
407 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U,
408 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U,
409 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U,
410 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
411 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U,
412 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U,
413 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U,
414 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U,
415 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U,
416 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
417 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U,
418 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U,
419 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U,
420 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
421 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U,
422 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U,
423 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U,
424 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U,
425 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U,
426 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U,
427 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U,
428 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
429 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
430 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
431 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
432 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U,
433 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U,
434 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U,
435 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U,
436 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U,
437 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U,
438 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U,
439 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U,
440 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U,
441 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
442 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
443 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U,
444 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
445 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U
446};
447static const uint64_t init_gen_rand_64_expected[] = {
448 KQU(16924766246869039260), KQU( 8201438687333352714),
449 KQU( 2265290287015001750), KQU(18397264611805473832),
450 KQU( 3375255223302384358), KQU( 6345559975416828796),
451 KQU(18229739242790328073), KQU( 7596792742098800905),
452 KQU( 255338647169685981), KQU( 2052747240048610300),
453 KQU(18328151576097299343), KQU(12472905421133796567),
454 KQU(11315245349717600863), KQU(16594110197775871209),
455 KQU(15708751964632456450), KQU(10452031272054632535),
456 KQU(11097646720811454386), KQU( 4556090668445745441),
457 KQU(17116187693090663106), KQU(14931526836144510645),
458 KQU( 9190752218020552591), KQU( 9625800285771901401),
459 KQU(13995141077659972832), KQU( 5194209094927829625),
460 KQU( 4156788379151063303), KQU( 8523452593770139494),
461 KQU(14082382103049296727), KQU( 2462601863986088483),
462 KQU( 3030583461592840678), KQU( 5221622077872827681),
463 KQU( 3084210671228981236), KQU(13956758381389953823),
464 KQU(13503889856213423831), KQU(15696904024189836170),
465 KQU( 4612584152877036206), KQU( 6231135538447867881),
466 KQU(10172457294158869468), KQU( 6452258628466708150),
467 KQU(14044432824917330221), KQU( 370168364480044279),
468 KQU(10102144686427193359), KQU( 667870489994776076),
469 KQU( 2732271956925885858), KQU(18027788905977284151),
470 KQU(15009842788582923859), KQU( 7136357960180199542),
471 KQU(15901736243475578127), KQU(16951293785352615701),
472 KQU(10551492125243691632), KQU(17668869969146434804),
473 KQU(13646002971174390445), KQU( 9804471050759613248),
474 KQU( 5511670439655935493), KQU(18103342091070400926),
475 KQU(17224512747665137533), KQU(15534627482992618168),
476 KQU( 1423813266186582647), KQU(15821176807932930024),
477 KQU( 30323369733607156), KQU(11599382494723479403),
478 KQU( 653856076586810062), KQU( 3176437395144899659),
479 KQU(14028076268147963917), KQU(16156398271809666195),
480 KQU( 3166955484848201676), KQU( 5746805620136919390),
481 KQU(17297845208891256593), KQU(11691653183226428483),
482 KQU(17900026146506981577), KQU(15387382115755971042),
483 KQU(16923567681040845943), KQU( 8039057517199388606),
484 KQU(11748409241468629263), KQU( 794358245539076095),
485 KQU(13438501964693401242), KQU(14036803236515618962),
486 KQU( 5252311215205424721), KQU(17806589612915509081),
487 KQU( 6802767092397596006), KQU(14212120431184557140),
488 KQU( 1072951366761385712), KQU(13098491780722836296),
489 KQU( 9466676828710797353), KQU(12673056849042830081),
490 KQU(12763726623645357580), KQU(16468961652999309493),
491 KQU(15305979875636438926), KQU(17444713151223449734),
492 KQU( 5692214267627883674), KQU(13049589139196151505),
493 KQU( 880115207831670745), KQU( 1776529075789695498),
494 KQU(16695225897801466485), KQU(10666901778795346845),
495 KQU( 6164389346722833869), KQU( 2863817793264300475),
496 KQU( 9464049921886304754), KQU( 3993566636740015468),
497 KQU( 9983749692528514136), KQU(16375286075057755211),
498 KQU(16042643417005440820), KQU(11445419662923489877),
499 KQU( 7999038846885158836), KQU( 6721913661721511535),
500 KQU( 5363052654139357320), KQU( 1817788761173584205),
501 KQU(13290974386445856444), KQU( 4650350818937984680),
502 KQU( 8219183528102484836), KQU( 1569862923500819899),
503 KQU( 4189359732136641860), KQU(14202822961683148583),
504 KQU( 4457498315309429058), KQU(13089067387019074834),
505 KQU(11075517153328927293), KQU(10277016248336668389),
506 KQU( 7070509725324401122), KQU(17808892017780289380),
507 KQU(13143367339909287349), KQU( 1377743745360085151),
508 KQU( 5749341807421286485), KQU(14832814616770931325),
509 KQU( 7688820635324359492), KQU(10960474011539770045),
510 KQU( 81970066653179790), KQU(12619476072607878022),
511 KQU( 4419566616271201744), KQU(15147917311750568503),
512 KQU( 5549739182852706345), KQU( 7308198397975204770),
513 KQU(13580425496671289278), KQU(17070764785210130301),
514 KQU( 8202832846285604405), KQU( 6873046287640887249),
515 KQU( 6927424434308206114), KQU( 6139014645937224874),
516 KQU(10290373645978487639), KQU(15904261291701523804),
517 KQU( 9628743442057826883), KQU(18383429096255546714),
518 KQU( 4977413265753686967), KQU( 7714317492425012869),
519 KQU( 9025232586309926193), KQU(14627338359776709107),
520 KQU(14759849896467790763), KQU(10931129435864423252),
521 KQU( 4588456988775014359), KQU(10699388531797056724),
522 KQU( 468652268869238792), KQU( 5755943035328078086),
523 KQU( 2102437379988580216), KQU( 9986312786506674028),
524 KQU( 2654207180040945604), KQU( 8726634790559960062),
525 KQU( 100497234871808137), KQU( 2800137176951425819),
526 KQU( 6076627612918553487), KQU( 5780186919186152796),
527 KQU( 8179183595769929098), KQU( 6009426283716221169),
528 KQU( 2796662551397449358), KQU( 1756961367041986764),
529 KQU( 6972897917355606205), KQU(14524774345368968243),
530 KQU( 2773529684745706940), KQU( 4853632376213075959),
531 KQU( 4198177923731358102), KQU( 8271224913084139776),
532 KQU( 2741753121611092226), KQU(16782366145996731181),
533 KQU(15426125238972640790), KQU(13595497100671260342),
534 KQU( 3173531022836259898), KQU( 6573264560319511662),
535 KQU(18041111951511157441), KQU( 2351433581833135952),
536 KQU( 3113255578908173487), KQU( 1739371330877858784),
537 KQU(16046126562789165480), KQU( 8072101652214192925),
538 KQU(15267091584090664910), KQU( 9309579200403648940),
539 KQU( 5218892439752408722), KQU(14492477246004337115),
540 KQU(17431037586679770619), KQU( 7385248135963250480),
541 KQU( 9580144956565560660), KQU( 4919546228040008720),
542 KQU(15261542469145035584), KQU(18233297270822253102),
543 KQU( 5453248417992302857), KQU( 9309519155931460285),
544 KQU(10342813012345291756), KQU(15676085186784762381),
545 KQU(15912092950691300645), KQU( 9371053121499003195),
546 KQU( 9897186478226866746), KQU(14061858287188196327),
547 KQU( 122575971620788119), KQU(12146750969116317754),
548 KQU( 4438317272813245201), KQU( 8332576791009527119),
549 KQU(13907785691786542057), KQU(10374194887283287467),
550 KQU( 2098798755649059566), KQU( 3416235197748288894),
551 KQU( 8688269957320773484), KQU( 7503964602397371571),
552 KQU(16724977015147478236), KQU( 9461512855439858184),
553 KQU(13259049744534534727), KQU( 3583094952542899294),
554 KQU( 8764245731305528292), KQU(13240823595462088985),
555 KQU(13716141617617910448), KQU(18114969519935960955),
556 KQU( 2297553615798302206), KQU( 4585521442944663362),
557 KQU(17776858680630198686), KQU( 4685873229192163363),
558 KQU( 152558080671135627), KQU(15424900540842670088),
559 KQU(13229630297130024108), KQU(17530268788245718717),
560 KQU(16675633913065714144), KQU( 3158912717897568068),
561 KQU(15399132185380087288), KQU( 7401418744515677872),
562 KQU(13135412922344398535), KQU( 6385314346100509511),
563 KQU(13962867001134161139), KQU(10272780155442671999),
564 KQU(12894856086597769142), KQU(13340877795287554994),
565 KQU(12913630602094607396), KQU(12543167911119793857),
566 KQU(17343570372251873096), KQU(10959487764494150545),
567 KQU( 6966737953093821128), KQU(13780699135496988601),
568 KQU( 4405070719380142046), KQU(14923788365607284982),
569 KQU( 2869487678905148380), KQU( 6416272754197188403),
570 KQU(15017380475943612591), KQU( 1995636220918429487),
571 KQU( 3402016804620122716), KQU(15800188663407057080),
572 KQU(11362369990390932882), KQU(15262183501637986147),
573 KQU(10239175385387371494), KQU( 9352042420365748334),
574 KQU( 1682457034285119875), KQU( 1724710651376289644),
575 KQU( 2038157098893817966), KQU( 9897825558324608773),
576 KQU( 1477666236519164736), KQU(16835397314511233640),
577 KQU(10370866327005346508), KQU(10157504370660621982),
578 KQU(12113904045335882069), KQU(13326444439742783008),
579 KQU(11302769043000765804), KQU(13594979923955228484),
580 KQU(11779351762613475968), KQU( 3786101619539298383),
581 KQU( 8021122969180846063), KQU(15745904401162500495),
582 KQU(10762168465993897267), KQU(13552058957896319026),
583 KQU(11200228655252462013), KQU( 5035370357337441226),
584 KQU( 7593918984545500013), KQU( 5418554918361528700),
585 KQU( 4858270799405446371), KQU( 9974659566876282544),
586 KQU(18227595922273957859), KQU( 2772778443635656220),
587 KQU(14285143053182085385), KQU( 9939700992429600469),
588 KQU(12756185904545598068), KQU( 2020783375367345262),
589 KQU( 57026775058331227), KQU( 950827867930065454),
590 KQU( 6602279670145371217), KQU( 2291171535443566929),
591 KQU( 5832380724425010313), KQU( 1220343904715982285),
592 KQU(17045542598598037633), KQU(15460481779702820971),
593 KQU(13948388779949365130), KQU(13975040175430829518),
594 KQU(17477538238425541763), KQU(11104663041851745725),
595 KQU(15860992957141157587), KQU(14529434633012950138),
596 KQU( 2504838019075394203), KQU( 7512113882611121886),
597 KQU( 4859973559980886617), KQU( 1258601555703250219),
598 KQU(15594548157514316394), KQU( 4516730171963773048),
599 KQU(11380103193905031983), KQU( 6809282239982353344),
600 KQU(18045256930420065002), KQU( 2453702683108791859),
601 KQU( 977214582986981460), KQU( 2006410402232713466),
602 KQU( 6192236267216378358), KQU( 3429468402195675253),
603 KQU(18146933153017348921), KQU(17369978576367231139),
604 KQU( 1246940717230386603), KQU(11335758870083327110),
605 KQU(14166488801730353682), KQU( 9008573127269635732),
606 KQU(10776025389820643815), KQU(15087605441903942962),
607 KQU( 1359542462712147922), KQU(13898874411226454206),
608 KQU(17911176066536804411), KQU( 9435590428600085274),
609 KQU( 294488509967864007), KQU( 8890111397567922046),
610 KQU( 7987823476034328778), KQU(13263827582440967651),
611 KQU( 7503774813106751573), KQU(14974747296185646837),
612 KQU( 8504765037032103375), KQU(17340303357444536213),
613 KQU( 7704610912964485743), KQU( 8107533670327205061),
614 KQU( 9062969835083315985), KQU(16968963142126734184),
615 KQU(12958041214190810180), KQU( 2720170147759570200),
616 KQU( 2986358963942189566), KQU(14884226322219356580),
617 KQU( 286224325144368520), KQU(11313800433154279797),
618 KQU(18366849528439673248), KQU(17899725929482368789),
619 KQU( 3730004284609106799), KQU( 1654474302052767205),
620 KQU( 5006698007047077032), KQU( 8196893913601182838),
621 KQU(15214541774425211640), KQU(17391346045606626073),
622 KQU( 8369003584076969089), KQU( 3939046733368550293),
623 KQU(10178639720308707785), KQU( 2180248669304388697),
624 KQU( 62894391300126322), KQU( 9205708961736223191),
625 KQU( 6837431058165360438), KQU( 3150743890848308214),
626 KQU(17849330658111464583), KQU(12214815643135450865),
627 KQU(13410713840519603402), KQU( 3200778126692046802),
628 KQU(13354780043041779313), KQU( 800850022756886036),
629 KQU(15660052933953067433), KQU( 6572823544154375676),
630 KQU(11030281857015819266), KQU(12682241941471433835),
631 KQU(11654136407300274693), KQU( 4517795492388641109),
632 KQU( 9757017371504524244), KQU(17833043400781889277),
633 KQU(12685085201747792227), KQU(10408057728835019573),
634 KQU( 98370418513455221), KQU( 6732663555696848598),
635 KQU(13248530959948529780), KQU( 3530441401230622826),
636 KQU(18188251992895660615), KQU( 1847918354186383756),
637 KQU( 1127392190402660921), KQU(11293734643143819463),
638 KQU( 3015506344578682982), KQU(13852645444071153329),
639 KQU( 2121359659091349142), KQU( 1294604376116677694),
640 KQU( 5616576231286352318), KQU( 7112502442954235625),
641 KQU(11676228199551561689), KQU(12925182803007305359),
642 KQU( 7852375518160493082), KQU( 1136513130539296154),
643 KQU( 5636923900916593195), KQU( 3221077517612607747),
644 KQU(17784790465798152513), KQU( 3554210049056995938),
645 KQU(17476839685878225874), KQU( 3206836372585575732),
646 KQU( 2765333945644823430), KQU(10080070903718799528),
647 KQU( 5412370818878286353), KQU( 9689685887726257728),
648 KQU( 8236117509123533998), KQU( 1951139137165040214),
649 KQU( 4492205209227980349), KQU(16541291230861602967),
650 KQU( 1424371548301437940), KQU( 9117562079669206794),
651 KQU(14374681563251691625), KQU(13873164030199921303),
652 KQU( 6680317946770936731), KQU(15586334026918276214),
653 KQU(10896213950976109802), KQU( 9506261949596413689),
654 KQU( 9903949574308040616), KQU( 6038397344557204470),
655 KQU( 174601465422373648), KQU(15946141191338238030),
656 KQU(17142225620992044937), KQU( 7552030283784477064),
657 KQU( 2947372384532947997), KQU( 510797021688197711),
658 KQU( 4962499439249363461), KQU( 23770320158385357),
659 KQU( 959774499105138124), KQU( 1468396011518788276),
660 KQU( 2015698006852312308), KQU( 4149400718489980136),
661 KQU( 5992916099522371188), KQU(10819182935265531076),
662 KQU(16189787999192351131), KQU( 342833961790261950),
663 KQU(12470830319550495336), KQU(18128495041912812501),
664 KQU( 1193600899723524337), KQU( 9056793666590079770),
665 KQU( 2154021227041669041), KQU( 4963570213951235735),
666 KQU( 4865075960209211409), KQU( 2097724599039942963),
667 KQU( 2024080278583179845), KQU(11527054549196576736),
668 KQU(10650256084182390252), KQU( 4808408648695766755),
669 KQU( 1642839215013788844), KQU(10607187948250398390),
670 KQU( 7076868166085913508), KQU( 730522571106887032),
671 KQU(12500579240208524895), KQU( 4484390097311355324),
672 KQU(15145801330700623870), KQU( 8055827661392944028),
673 KQU( 5865092976832712268), KQU(15159212508053625143),
674 KQU( 3560964582876483341), KQU( 4070052741344438280),
675 KQU( 6032585709886855634), KQU(15643262320904604873),
676 KQU( 2565119772293371111), KQU( 318314293065348260),
677 KQU(15047458749141511872), KQU( 7772788389811528730),
678 KQU( 7081187494343801976), KQU( 6465136009467253947),
679 KQU(10425940692543362069), KQU( 554608190318339115),
680 KQU(14796699860302125214), KQU( 1638153134431111443),
681 KQU(10336967447052276248), KQU( 8412308070396592958),
682 KQU( 4004557277152051226), KQU( 8143598997278774834),
683 KQU(16413323996508783221), KQU(13139418758033994949),
684 KQU( 9772709138335006667), KQU( 2818167159287157659),
685 KQU(17091740573832523669), KQU(14629199013130751608),
686 KQU(18268322711500338185), KQU( 8290963415675493063),
687 KQU( 8830864907452542588), KQU( 1614839084637494849),
688 KQU(14855358500870422231), KQU( 3472996748392519937),
689 KQU(15317151166268877716), KQU( 5825895018698400362),
690 KQU(16730208429367544129), KQU(10481156578141202800),
691 KQU( 4746166512382823750), KQU(12720876014472464998),
692 KQU( 8825177124486735972), KQU(13733447296837467838),
693 KQU( 6412293741681359625), KQU( 8313213138756135033),
694 KQU(11421481194803712517), KQU( 7997007691544174032),
695 KQU( 6812963847917605930), KQU( 9683091901227558641),
696 KQU(14703594165860324713), KQU( 1775476144519618309),
697 KQU( 2724283288516469519), KQU( 717642555185856868),
698 KQU( 8736402192215092346), KQU(11878800336431381021),
699 KQU( 4348816066017061293), KQU( 6115112756583631307),
700 KQU( 9176597239667142976), KQU(12615622714894259204),
701 KQU(10283406711301385987), KQU( 5111762509485379420),
702 KQU( 3118290051198688449), KQU( 7345123071632232145),
703 KQU( 9176423451688682359), KQU( 4843865456157868971),
704 KQU(12008036363752566088), KQU(12058837181919397720),
705 KQU( 2145073958457347366), KQU( 1526504881672818067),
706 KQU( 3488830105567134848), KQU(13208362960674805143),
707 KQU( 4077549672899572192), KQU( 7770995684693818365),
708 KQU( 1398532341546313593), KQU(12711859908703927840),
709 KQU( 1417561172594446813), KQU(17045191024194170604),
710 KQU( 4101933177604931713), KQU(14708428834203480320),
711 KQU(17447509264469407724), KQU(14314821973983434255),
712 KQU(17990472271061617265), KQU( 5087756685841673942),
713 KQU(12797820586893859939), KQU( 1778128952671092879),
714 KQU( 3535918530508665898), KQU( 9035729701042481301),
715 KQU(14808661568277079962), KQU(14587345077537747914),
716 KQU(11920080002323122708), KQU( 6426515805197278753),
717 KQU( 3295612216725984831), KQU(11040722532100876120),
718 KQU(12305952936387598754), KQU(16097391899742004253),
719 KQU( 4908537335606182208), KQU(12446674552196795504),
720 KQU(16010497855816895177), KQU( 9194378874788615551),
721 KQU( 3382957529567613384), KQU( 5154647600754974077),
722 KQU( 9801822865328396141), KQU( 9023662173919288143),
723 KQU(17623115353825147868), KQU( 8238115767443015816),
724 KQU(15811444159859002560), KQU( 9085612528904059661),
725 KQU( 6888601089398614254), KQU( 258252992894160189),
726 KQU( 6704363880792428622), KQU( 6114966032147235763),
727 KQU(11075393882690261875), KQU( 8797664238933620407),
728 KQU( 5901892006476726920), KQU( 5309780159285518958),
729 KQU(14940808387240817367), KQU(14642032021449656698),
730 KQU( 9808256672068504139), KQU( 3670135111380607658),
731 KQU(11211211097845960152), KQU( 1474304506716695808),
732 KQU(15843166204506876239), KQU( 7661051252471780561),
733 KQU(10170905502249418476), KQU( 7801416045582028589),
734 KQU( 2763981484737053050), KQU( 9491377905499253054),
735 KQU(16201395896336915095), KQU( 9256513756442782198),
736 KQU( 5411283157972456034), KQU( 5059433122288321676),
737 KQU( 4327408006721123357), KQU( 9278544078834433377),
738 KQU( 7601527110882281612), KQU(11848295896975505251),
739 KQU(12096998801094735560), KQU(14773480339823506413),
740 KQU(15586227433895802149), KQU(12786541257830242872),
741 KQU( 6904692985140503067), KQU( 5309011515263103959),
742 KQU(12105257191179371066), KQU(14654380212442225037),
743 KQU( 2556774974190695009), KQU( 4461297399927600261),
744 KQU(14888225660915118646), KQU(14915459341148291824),
745 KQU( 2738802166252327631), KQU( 6047155789239131512),
746 KQU(12920545353217010338), KQU(10697617257007840205),
747 KQU( 2751585253158203504), KQU(13252729159780047496),
748 KQU(14700326134672815469), KQU(14082527904374600529),
749 KQU(16852962273496542070), KQU(17446675504235853907),
750 KQU(15019600398527572311), KQU(12312781346344081551),
751 KQU(14524667935039810450), KQU( 5634005663377195738),
752 KQU(11375574739525000569), KQU( 2423665396433260040),
753 KQU( 5222836914796015410), KQU( 4397666386492647387),
754 KQU( 4619294441691707638), KQU( 665088602354770716),
755 KQU(13246495665281593610), KQU( 6564144270549729409),
756 KQU(10223216188145661688), KQU( 3961556907299230585),
757 KQU(11543262515492439914), KQU(16118031437285993790),
758 KQU( 7143417964520166465), KQU(13295053515909486772),
759 KQU( 40434666004899675), KQU(17127804194038347164),
760 KQU( 8599165966560586269), KQU( 8214016749011284903),
761 KQU(13725130352140465239), KQU( 5467254474431726291),
762 KQU( 7748584297438219877), KQU(16933551114829772472),
763 KQU( 2169618439506799400), KQU( 2169787627665113463),
764 KQU(17314493571267943764), KQU(18053575102911354912),
765 KQU(11928303275378476973), KQU(11593850925061715550),
766 KQU(17782269923473589362), KQU( 3280235307704747039),
767 KQU( 6145343578598685149), KQU(17080117031114086090),
768 KQU(18066839902983594755), KQU( 6517508430331020706),
769 KQU( 8092908893950411541), KQU(12558378233386153732),
770 KQU( 4476532167973132976), KQU(16081642430367025016),
771 KQU( 4233154094369139361), KQU( 8693630486693161027),
772 KQU(11244959343027742285), KQU(12273503967768513508),
773 KQU(14108978636385284876), KQU( 7242414665378826984),
774 KQU( 6561316938846562432), KQU( 8601038474994665795),
775 KQU(17532942353612365904), KQU(17940076637020912186),
776 KQU( 7340260368823171304), KQU( 7061807613916067905),
777 KQU(10561734935039519326), KQU(17990796503724650862),
778 KQU( 6208732943911827159), KQU( 359077562804090617),
779 KQU(14177751537784403113), KQU(10659599444915362902),
780 KQU(15081727220615085833), KQU(13417573895659757486),
781 KQU(15513842342017811524), KQU(11814141516204288231),
782 KQU( 1827312513875101814), KQU( 2804611699894603103),
783 KQU(17116500469975602763), KQU(12270191815211952087),
784 KQU(12256358467786024988), KQU(18435021722453971267),
785 KQU( 671330264390865618), KQU( 476504300460286050),
786 KQU(16465470901027093441), KQU( 4047724406247136402),
787 KQU( 1322305451411883346), KQU( 1388308688834322280),
788 KQU( 7303989085269758176), KQU( 9323792664765233642),
789 KQU( 4542762575316368936), KQU(17342696132794337618),
790 KQU( 4588025054768498379), KQU(13415475057390330804),
791 KQU(17880279491733405570), KQU(10610553400618620353),
792 KQU( 3180842072658960139), KQU(13002966655454270120),
793 KQU( 1665301181064982826), KQU( 7083673946791258979),
794 KQU( 190522247122496820), KQU(17388280237250677740),
795 KQU( 8430770379923642945), KQU(12987180971921668584),
796 KQU( 2311086108365390642), KQU( 2870984383579822345),
797 KQU(14014682609164653318), KQU(14467187293062251484),
798 KQU( 192186361147413298), KQU(15171951713531796524),
799 KQU( 9900305495015948728), KQU(17958004775615466344),
800 KQU(14346380954498606514), KQU(18040047357617407096),
801 KQU( 5035237584833424532), KQU(15089555460613972287),
802 KQU( 4131411873749729831), KQU( 1329013581168250330),
803 KQU(10095353333051193949), KQU(10749518561022462716),
804 KQU( 9050611429810755847), KQU(15022028840236655649),
805 KQU( 8775554279239748298), KQU(13105754025489230502),
806 KQU(15471300118574167585), KQU( 89864764002355628),
807 KQU( 8776416323420466637), KQU( 5280258630612040891),
808 KQU( 2719174488591862912), KQU( 7599309137399661994),
809 KQU(15012887256778039979), KQU(14062981725630928925),
810 KQU(12038536286991689603), KQU( 7089756544681775245),
811 KQU(10376661532744718039), KQU( 1265198725901533130),
812 KQU(13807996727081142408), KQU( 2935019626765036403),
813 KQU( 7651672460680700141), KQU( 3644093016200370795),
814 KQU( 2840982578090080674), KQU(17956262740157449201),
815 KQU(18267979450492880548), KQU(11799503659796848070),
816 KQU( 9942537025669672388), KQU(11886606816406990297),
817 KQU( 5488594946437447576), KQU( 7226714353282744302),
818 KQU( 3784851653123877043), KQU( 878018453244803041),
819 KQU(12110022586268616085), KQU( 734072179404675123),
820 KQU(11869573627998248542), KQU( 469150421297783998),
821 KQU( 260151124912803804), KQU(11639179410120968649),
822 KQU( 9318165193840846253), KQU(12795671722734758075),
823 KQU(15318410297267253933), KQU( 691524703570062620),
824 KQU( 5837129010576994601), KQU(15045963859726941052),
825 KQU( 5850056944932238169), KQU(12017434144750943807),
826 KQU( 7447139064928956574), KQU( 3101711812658245019),
827 KQU(16052940704474982954), KQU(18195745945986994042),
828 KQU( 8932252132785575659), KQU(13390817488106794834),
829 KQU(11582771836502517453), KQU( 4964411326683611686),
830 KQU( 2195093981702694011), KQU(14145229538389675669),
831 KQU(16459605532062271798), KQU( 866316924816482864),
832 KQU( 4593041209937286377), KQU( 8415491391910972138),
833 KQU( 4171236715600528969), KQU(16637569303336782889),
834 KQU( 2002011073439212680), KQU(17695124661097601411),
835 KQU( 4627687053598611702), KQU( 7895831936020190403),
836 KQU( 8455951300917267802), KQU( 2923861649108534854),
837 KQU( 8344557563927786255), KQU( 6408671940373352556),
838 KQU(12210227354536675772), KQU(14294804157294222295),
839 KQU(10103022425071085127), KQU(10092959489504123771),
840 KQU( 6554774405376736268), KQU(12629917718410641774),
841 KQU( 6260933257596067126), KQU( 2460827021439369673),
842 KQU( 2541962996717103668), KQU( 597377203127351475),
843 KQU( 5316984203117315309), KQU( 4811211393563241961),
844 KQU(13119698597255811641), KQU( 8048691512862388981),
845 KQU(10216818971194073842), KQU( 4612229970165291764),
846 KQU(10000980798419974770), KQU( 6877640812402540687),
847 KQU( 1488727563290436992), KQU( 2227774069895697318),
848 KQU(11237754507523316593), KQU(13478948605382290972),
849 KQU( 1963583846976858124), KQU( 5512309205269276457),
850 KQU( 3972770164717652347), KQU( 3841751276198975037),
851 KQU(10283343042181903117), KQU( 8564001259792872199),
852 KQU(16472187244722489221), KQU( 8953493499268945921),
853 KQU( 3518747340357279580), KQU( 4003157546223963073),
854 KQU( 3270305958289814590), KQU( 3966704458129482496),
855 KQU( 8122141865926661939), KQU(14627734748099506653),
856 KQU(13064426990862560568), KQU( 2414079187889870829),
857 KQU( 5378461209354225306), KQU(10841985740128255566),
858 KQU( 538582442885401738), KQU( 7535089183482905946),
859 KQU(16117559957598879095), KQU( 8477890721414539741),
860 KQU( 1459127491209533386), KQU(17035126360733620462),
861 KQU( 8517668552872379126), KQU(10292151468337355014),
862 KQU(17081267732745344157), KQU(13751455337946087178),
863 KQU(14026945459523832966), KQU( 6653278775061723516),
864 KQU(10619085543856390441), KQU( 2196343631481122885),
865 KQU(10045966074702826136), KQU(10082317330452718282),
866 KQU( 5920859259504831242), KQU( 9951879073426540617),
867 KQU( 7074696649151414158), KQU(15808193543879464318),
868 KQU( 7385247772746953374), KQU( 3192003544283864292),
869 KQU(18153684490917593847), KQU(12423498260668568905),
870 KQU(10957758099756378169), KQU(11488762179911016040),
871 KQU( 2099931186465333782), KQU(11180979581250294432),
872 KQU( 8098916250668367933), KQU( 3529200436790763465),
873 KQU(12988418908674681745), KQU( 6147567275954808580),
874 KQU( 3207503344604030989), KQU(10761592604898615360),
875 KQU( 229854861031893504), KQU( 8809853962667144291),
876 KQU(13957364469005693860), KQU( 7634287665224495886),
877 KQU(12353487366976556874), KQU( 1134423796317152034),
878 KQU( 2088992471334107068), KQU( 7393372127190799698),
879 KQU( 1845367839871058391), KQU( 207922563987322884),
880 KQU(11960870813159944976), KQU(12182120053317317363),
881 KQU(17307358132571709283), KQU(13871081155552824936),
882 KQU(18304446751741566262), KQU( 7178705220184302849),
883 KQU(10929605677758824425), KQU(16446976977835806844),
884 KQU(13723874412159769044), KQU( 6942854352100915216),
885 KQU( 1726308474365729390), KQU( 2150078766445323155),
886 KQU(15345558947919656626), KQU(12145453828874527201),
887 KQU( 2054448620739726849), KQU( 2740102003352628137),
888 KQU(11294462163577610655), KQU( 756164283387413743),
889 KQU(17841144758438810880), KQU(10802406021185415861),
890 KQU( 8716455530476737846), KQU( 6321788834517649606),
891 KQU(14681322910577468426), KQU(17330043563884336387),
892 KQU(12701802180050071614), KQU(14695105111079727151),
893 KQU( 5112098511654172830), KQU( 4957505496794139973),
894 KQU( 8270979451952045982), KQU(12307685939199120969),
895 KQU(12425799408953443032), KQU( 8376410143634796588),
896 KQU(16621778679680060464), KQU( 3580497854566660073),
897 KQU( 1122515747803382416), KQU( 857664980960597599),
898 KQU( 6343640119895925918), KQU(12878473260854462891),
899 KQU(10036813920765722626), KQU(14451335468363173812),
900 KQU( 5476809692401102807), KQU(16442255173514366342),
901 KQU(13060203194757167104), KQU(14354124071243177715),
902 KQU(15961249405696125227), KQU(13703893649690872584),
903 KQU( 363907326340340064), KQU( 6247455540491754842),
904 KQU(12242249332757832361), KQU( 156065475679796717),
905 KQU( 9351116235749732355), KQU( 4590350628677701405),
906 KQU( 1671195940982350389), KQU(13501398458898451905),
907 KQU( 6526341991225002255), KQU( 1689782913778157592),
908 KQU( 7439222350869010334), KQU(13975150263226478308),
909 KQU(11411961169932682710), KQU(17204271834833847277),
910 KQU( 541534742544435367), KQU( 6591191931218949684),
911 KQU( 2645454775478232486), KQU( 4322857481256485321),
912 KQU( 8477416487553065110), KQU(12902505428548435048),
913 KQU( 971445777981341415), KQU(14995104682744976712),
914 KQU( 4243341648807158063), KQU( 8695061252721927661),
915 KQU( 5028202003270177222), KQU( 2289257340915567840),
916 KQU(13870416345121866007), KQU(13994481698072092233),
917 KQU( 6912785400753196481), KQU( 2278309315841980139),
918 KQU( 4329765449648304839), KQU( 5963108095785485298),
919 KQU( 4880024847478722478), KQU(16015608779890240947),
920 KQU( 1866679034261393544), KQU( 914821179919731519),
921 KQU( 9643404035648760131), KQU( 2418114953615593915),
922 KQU( 944756836073702374), KQU(15186388048737296834),
923 KQU( 7723355336128442206), KQU( 7500747479679599691),
924 KQU(18013961306453293634), KQU( 2315274808095756456),
925 KQU(13655308255424029566), KQU(17203800273561677098),
926 KQU( 1382158694422087756), KQU( 5090390250309588976),
927 KQU( 517170818384213989), KQU( 1612709252627729621),
928 KQU( 1330118955572449606), KQU( 300922478056709885),
929 KQU(18115693291289091987), KQU(13491407109725238321),
930 KQU(15293714633593827320), KQU( 5151539373053314504),
931 KQU( 5951523243743139207), KQU(14459112015249527975),
932 KQU( 5456113959000700739), KQU( 3877918438464873016),
933 KQU(12534071654260163555), KQU(15871678376893555041),
934 KQU(11005484805712025549), KQU(16353066973143374252),
935 KQU( 4358331472063256685), KQU( 8268349332210859288),
936 KQU(12485161590939658075), KQU(13955993592854471343),
937 KQU( 5911446886848367039), KQU(14925834086813706974),
938 KQU( 6590362597857994805), KQU( 1280544923533661875),
939 KQU( 1637756018947988164), KQU( 4734090064512686329),
940 KQU(16693705263131485912), KQU( 6834882340494360958),
941 KQU( 8120732176159658505), KQU( 2244371958905329346),
942 KQU(10447499707729734021), KQU( 7318742361446942194),
943 KQU( 8032857516355555296), KQU(14023605983059313116),
944 KQU( 1032336061815461376), KQU( 9840995337876562612),
945 KQU( 9869256223029203587), KQU(12227975697177267636),
946 KQU(12728115115844186033), KQU( 7752058479783205470),
947 KQU( 729733219713393087), KQU(12954017801239007622)
948};
949static const uint64_t init_by_array_64_expected[] = {
950 KQU( 2100341266307895239), KQU( 8344256300489757943),
951 KQU(15687933285484243894), KQU( 8268620370277076319),
952 KQU(12371852309826545459), KQU( 8800491541730110238),
953 KQU(18113268950100835773), KQU( 2886823658884438119),
954 KQU( 3293667307248180724), KQU( 9307928143300172731),
955 KQU( 7688082017574293629), KQU( 900986224735166665),
956 KQU( 9977972710722265039), KQU( 6008205004994830552),
957 KQU( 546909104521689292), KQU( 7428471521869107594),
958 KQU(14777563419314721179), KQU(16116143076567350053),
959 KQU( 5322685342003142329), KQU( 4200427048445863473),
960 KQU( 4693092150132559146), KQU(13671425863759338582),
961 KQU( 6747117460737639916), KQU( 4732666080236551150),
962 KQU( 5912839950611941263), KQU( 3903717554504704909),
963 KQU( 2615667650256786818), KQU(10844129913887006352),
964 KQU(13786467861810997820), KQU(14267853002994021570),
965 KQU(13767807302847237439), KQU(16407963253707224617),
966 KQU( 4802498363698583497), KQU( 2523802839317209764),
967 KQU( 3822579397797475589), KQU( 8950320572212130610),
968 KQU( 3745623504978342534), KQU(16092609066068482806),
969 KQU( 9817016950274642398), KQU(10591660660323829098),
970 KQU(11751606650792815920), KQU( 5122873818577122211),
971 KQU(17209553764913936624), KQU( 6249057709284380343),
972 KQU(15088791264695071830), KQU(15344673071709851930),
973 KQU( 4345751415293646084), KQU( 2542865750703067928),
974 KQU(13520525127852368784), KQU(18294188662880997241),
975 KQU( 3871781938044881523), KQU( 2873487268122812184),
976 KQU(15099676759482679005), KQU(15442599127239350490),
977 KQU( 6311893274367710888), KQU( 3286118760484672933),
978 KQU( 4146067961333542189), KQU(13303942567897208770),
979 KQU( 8196013722255630418), KQU( 4437815439340979989),
980 KQU(15433791533450605135), KQU( 4254828956815687049),
981 KQU( 1310903207708286015), KQU(10529182764462398549),
982 KQU(14900231311660638810), KQU( 9727017277104609793),
983 KQU( 1821308310948199033), KQU(11628861435066772084),
984 KQU( 9469019138491546924), KQU( 3145812670532604988),
985 KQU( 9938468915045491919), KQU( 1562447430672662142),
986 KQU(13963995266697989134), KQU( 3356884357625028695),
987 KQU( 4499850304584309747), KQU( 8456825817023658122),
988 KQU(10859039922814285279), KQU( 8099512337972526555),
989 KQU( 348006375109672149), KQU(11919893998241688603),
990 KQU( 1104199577402948826), KQU(16689191854356060289),
991 KQU(10992552041730168078), KQU( 7243733172705465836),
992 KQU( 5668075606180319560), KQU(18182847037333286970),
993 KQU( 4290215357664631322), KQU( 4061414220791828613),
994 KQU(13006291061652989604), KQU( 7140491178917128798),
995 KQU(12703446217663283481), KQU( 5500220597564558267),
996 KQU(10330551509971296358), KQU(15958554768648714492),
997 KQU( 5174555954515360045), KQU( 1731318837687577735),
998 KQU( 3557700801048354857), KQU(13764012341928616198),
999 KQU(13115166194379119043), KQU( 7989321021560255519),
1000 KQU( 2103584280905877040), KQU( 9230788662155228488),
1001 KQU(16396629323325547654), KQU( 657926409811318051),
1002 KQU(15046700264391400727), KQU( 5120132858771880830),
1003 KQU( 7934160097989028561), KQU( 6963121488531976245),
1004 KQU(17412329602621742089), KQU(15144843053931774092),
1005 KQU(17204176651763054532), KQU(13166595387554065870),
1006 KQU( 8590377810513960213), KQU( 5834365135373991938),
1007 KQU( 7640913007182226243), KQU( 3479394703859418425),
1008 KQU(16402784452644521040), KQU( 4993979809687083980),
1009 KQU(13254522168097688865), KQU(15643659095244365219),
1010 KQU( 5881437660538424982), KQU(11174892200618987379),
1011 KQU( 254409966159711077), KQU(17158413043140549909),
1012 KQU( 3638048789290376272), KQU( 1376816930299489190),
1013 KQU( 4622462095217761923), KQU(15086407973010263515),
1014 KQU(13253971772784692238), KQU( 5270549043541649236),
1015 KQU(11182714186805411604), KQU(12283846437495577140),
1016 KQU( 5297647149908953219), KQU(10047451738316836654),
1017 KQU( 4938228100367874746), KQU(12328523025304077923),
1018 KQU( 3601049438595312361), KQU( 9313624118352733770),
1019 KQU(13322966086117661798), KQU(16660005705644029394),
1020 KQU(11337677526988872373), KQU(13869299102574417795),
1021 KQU(15642043183045645437), KQU( 3021755569085880019),
1022 KQU( 4979741767761188161), KQU(13679979092079279587),
1023 KQU( 3344685842861071743), KQU(13947960059899588104),
1024 KQU( 305806934293368007), KQU( 5749173929201650029),
1025 KQU(11123724852118844098), KQU(15128987688788879802),
1026 KQU(15251651211024665009), KQU( 7689925933816577776),
1027 KQU(16732804392695859449), KQU(17087345401014078468),
1028 KQU(14315108589159048871), KQU( 4820700266619778917),
1029 KQU(16709637539357958441), KQU( 4936227875177351374),
1030 KQU( 2137907697912987247), KQU(11628565601408395420),
1031 KQU( 2333250549241556786), KQU( 5711200379577778637),
1032 KQU( 5170680131529031729), KQU(12620392043061335164),
1033 KQU( 95363390101096078), KQU( 5487981914081709462),
1034 KQU( 1763109823981838620), KQU( 3395861271473224396),
1035 KQU( 1300496844282213595), KQU( 6894316212820232902),
1036 KQU(10673859651135576674), KQU( 5911839658857903252),
1037 KQU(17407110743387299102), KQU( 8257427154623140385),
1038 KQU(11389003026741800267), KQU( 4070043211095013717),
1039 KQU(11663806997145259025), KQU(15265598950648798210),
1040 KQU( 630585789434030934), KQU( 3524446529213587334),
1041 KQU( 7186424168495184211), KQU(10806585451386379021),
1042 KQU(11120017753500499273), KQU( 1586837651387701301),
1043 KQU(17530454400954415544), KQU( 9991670045077880430),
1044 KQU( 7550997268990730180), KQU( 8640249196597379304),
1045 KQU( 3522203892786893823), KQU(10401116549878854788),
1046 KQU(13690285544733124852), KQU( 8295785675455774586),
1047 KQU(15535716172155117603), KQU( 3112108583723722511),
1048 KQU(17633179955339271113), KQU(18154208056063759375),
1049 KQU( 1866409236285815666), KQU(13326075895396412882),
1050 KQU( 8756261842948020025), KQU( 6281852999868439131),
1051 KQU(15087653361275292858), KQU(10333923911152949397),
1052 KQU( 5265567645757408500), KQU(12728041843210352184),
1053 KQU( 6347959327507828759), KQU( 154112802625564758),
1054 KQU(18235228308679780218), KQU( 3253805274673352418),
1055 KQU( 4849171610689031197), KQU(17948529398340432518),
1056 KQU(13803510475637409167), KQU(13506570190409883095),
1057 KQU(15870801273282960805), KQU( 8451286481299170773),
1058 KQU( 9562190620034457541), KQU( 8518905387449138364),
1059 KQU(12681306401363385655), KQU( 3788073690559762558),
1060 KQU( 5256820289573487769), KQU( 2752021372314875467),
1061 KQU( 6354035166862520716), KQU( 4328956378309739069),
1062 KQU( 449087441228269600), KQU( 5533508742653090868),
1063 KQU( 1260389420404746988), KQU(18175394473289055097),
1064 KQU( 1535467109660399420), KQU( 8818894282874061442),
1065 KQU(12140873243824811213), KQU(15031386653823014946),
1066 KQU( 1286028221456149232), KQU( 6329608889367858784),
1067 KQU( 9419654354945132725), KQU( 6094576547061672379),
1068 KQU(17706217251847450255), KQU( 1733495073065878126),
1069 KQU(16918923754607552663), KQU( 8881949849954945044),
1070 KQU(12938977706896313891), KQU(14043628638299793407),
1071 KQU(18393874581723718233), KQU( 6886318534846892044),
1072 KQU(14577870878038334081), KQU(13541558383439414119),
1073 KQU(13570472158807588273), KQU(18300760537910283361),
1074 KQU( 818368572800609205), KQU( 1417000585112573219),
1075 KQU(12337533143867683655), KQU(12433180994702314480),
1076 KQU( 778190005829189083), KQU(13667356216206524711),
1077 KQU( 9866149895295225230), KQU(11043240490417111999),
1078 KQU( 1123933826541378598), KQU( 6469631933605123610),
1079 KQU(14508554074431980040), KQU(13918931242962026714),
1080 KQU( 2870785929342348285), KQU(14786362626740736974),
1081 KQU(13176680060902695786), KQU( 9591778613541679456),
1082 KQU( 9097662885117436706), KQU( 749262234240924947),
1083 KQU( 1944844067793307093), KQU( 4339214904577487742),
1084 KQU( 8009584152961946551), KQU(16073159501225501777),
1085 KQU( 3335870590499306217), KQU(17088312653151202847),
1086 KQU( 3108893142681931848), KQU(16636841767202792021),
1087 KQU(10423316431118400637), KQU( 8008357368674443506),
1088 KQU(11340015231914677875), KQU(17687896501594936090),
1089 KQU(15173627921763199958), KQU( 542569482243721959),
1090 KQU(15071714982769812975), KQU( 4466624872151386956),
1091 KQU( 1901780715602332461), KQU( 9822227742154351098),
1092 KQU( 1479332892928648780), KQU( 6981611948382474400),
1093 KQU( 7620824924456077376), KQU(14095973329429406782),
1094 KQU( 7902744005696185404), KQU(15830577219375036920),
1095 KQU(10287076667317764416), KQU(12334872764071724025),
1096 KQU( 4419302088133544331), KQU(14455842851266090520),
1097 KQU(12488077416504654222), KQU( 7953892017701886766),
1098 KQU( 6331484925529519007), KQU( 4902145853785030022),
1099 KQU(17010159216096443073), KQU(11945354668653886087),
1100 KQU(15112022728645230829), KQU(17363484484522986742),
1101 KQU( 4423497825896692887), KQU( 8155489510809067471),
1102 KQU( 258966605622576285), KQU( 5462958075742020534),
1103 KQU( 6763710214913276228), KQU( 2368935183451109054),
1104 KQU(14209506165246453811), KQU( 2646257040978514881),
1105 KQU( 3776001911922207672), KQU( 1419304601390147631),
1106 KQU(14987366598022458284), KQU( 3977770701065815721),
1107 KQU( 730820417451838898), KQU( 3982991703612885327),
1108 KQU( 2803544519671388477), KQU(17067667221114424649),
1109 KQU( 2922555119737867166), KQU( 1989477584121460932),
1110 KQU(15020387605892337354), KQU( 9293277796427533547),
1111 KQU(10722181424063557247), KQU(16704542332047511651),
1112 KQU( 5008286236142089514), KQU(16174732308747382540),
1113 KQU(17597019485798338402), KQU(13081745199110622093),
1114 KQU( 8850305883842258115), KQU(12723629125624589005),
1115 KQU( 8140566453402805978), KQU(15356684607680935061),
1116 KQU(14222190387342648650), KQU(11134610460665975178),
1117 KQU( 1259799058620984266), KQU(13281656268025610041),
1118 KQU( 298262561068153992), KQU(12277871700239212922),
1119 KQU(13911297774719779438), KQU(16556727962761474934),
1120 KQU(17903010316654728010), KQU( 9682617699648434744),
1121 KQU(14757681836838592850), KQU( 1327242446558524473),
1122 KQU(11126645098780572792), KQU( 1883602329313221774),
1123 KQU( 2543897783922776873), KQU(15029168513767772842),
1124 KQU(12710270651039129878), KQU(16118202956069604504),
1125 KQU(15010759372168680524), KQU( 2296827082251923948),
1126 KQU(10793729742623518101), KQU(13829764151845413046),
1127 KQU(17769301223184451213), KQU( 3118268169210783372),
1128 KQU(17626204544105123127), KQU( 7416718488974352644),
1129 KQU(10450751996212925994), KQU( 9352529519128770586),
1130 KQU( 259347569641110140), KQU( 8048588892269692697),
1131 KQU( 1774414152306494058), KQU(10669548347214355622),
1132 KQU(13061992253816795081), KQU(18432677803063861659),
1133 KQU( 8879191055593984333), KQU(12433753195199268041),
1134 KQU(14919392415439730602), KQU( 6612848378595332963),
1135 KQU( 6320986812036143628), KQU(10465592420226092859),
1136 KQU( 4196009278962570808), KQU( 3747816564473572224),
1137 KQU(17941203486133732898), KQU( 2350310037040505198),
1138 KQU( 5811779859134370113), KQU(10492109599506195126),
1139 KQU( 7699650690179541274), KQU( 1954338494306022961),
1140 KQU(14095816969027231152), KQU( 5841346919964852061),
1141 KQU(14945969510148214735), KQU( 3680200305887550992),
1142 KQU( 6218047466131695792), KQU( 8242165745175775096),
1143 KQU(11021371934053307357), KQU( 1265099502753169797),
1144 KQU( 4644347436111321718), KQU( 3609296916782832859),
1145 KQU( 8109807992218521571), KQU(18387884215648662020),
1146 KQU(14656324896296392902), KQU(17386819091238216751),
1147 KQU(17788300878582317152), KQU( 7919446259742399591),
1148 KQU( 4466613134576358004), KQU(12928181023667938509),
1149 KQU(13147446154454932030), KQU(16552129038252734620),
1150 KQU( 8395299403738822450), KQU(11313817655275361164),
1151 KQU( 434258809499511718), KQU( 2074882104954788676),
1152 KQU( 7929892178759395518), KQU( 9006461629105745388),
1153 KQU( 5176475650000323086), KQU(11128357033468341069),
1154 KQU(12026158851559118955), KQU(14699716249471156500),
1155 KQU( 448982497120206757), KQU( 4156475356685519900),
1156 KQU( 6063816103417215727), KQU(10073289387954971479),
1157 KQU( 8174466846138590962), KQU( 2675777452363449006),
1158 KQU( 9090685420572474281), KQU( 6659652652765562060),
1159 KQU(12923120304018106621), KQU(11117480560334526775),
1160 KQU( 937910473424587511), KQU( 1838692113502346645),
1161 KQU(11133914074648726180), KQU( 7922600945143884053),
1162 KQU(13435287702700959550), KQU( 5287964921251123332),
1163 KQU(11354875374575318947), KQU(17955724760748238133),
1164 KQU(13728617396297106512), KQU( 4107449660118101255),
1165 KQU( 1210269794886589623), KQU(11408687205733456282),
1166 KQU( 4538354710392677887), KQU(13566803319341319267),
1167 KQU(17870798107734050771), KQU( 3354318982568089135),
1168 KQU( 9034450839405133651), KQU(13087431795753424314),
1169 KQU( 950333102820688239), KQU( 1968360654535604116),
1170 KQU(16840551645563314995), KQU( 8867501803892924995),
1171 KQU(11395388644490626845), KQU( 1529815836300732204),
1172 KQU(13330848522996608842), KQU( 1813432878817504265),
1173 KQU( 2336867432693429560), KQU(15192805445973385902),
1174 KQU( 2528593071076407877), KQU( 128459777936689248),
1175 KQU( 9976345382867214866), KQU( 6208885766767996043),
1176 KQU(14982349522273141706), KQU( 3099654362410737822),
1177 KQU(13776700761947297661), KQU( 8806185470684925550),
1178 KQU( 8151717890410585321), KQU( 640860591588072925),
1179 KQU(14592096303937307465), KQU( 9056472419613564846),
1180 KQU(14861544647742266352), KQU(12703771500398470216),
1181 KQU( 3142372800384138465), KQU( 6201105606917248196),
1182 KQU(18337516409359270184), KQU(15042268695665115339),
1183 KQU(15188246541383283846), KQU(12800028693090114519),
1184 KQU( 5992859621101493472), KQU(18278043971816803521),
1185 KQU( 9002773075219424560), KQU( 7325707116943598353),
1186 KQU( 7930571931248040822), KQU( 5645275869617023448),
1187 KQU( 7266107455295958487), KQU( 4363664528273524411),
1188 KQU(14313875763787479809), KQU(17059695613553486802),
1189 KQU( 9247761425889940932), KQU(13704726459237593128),
1190 KQU( 2701312427328909832), KQU(17235532008287243115),
1191 KQU(14093147761491729538), KQU( 6247352273768386516),
1192 KQU( 8268710048153268415), KQU( 7985295214477182083),
1193 KQU(15624495190888896807), KQU( 3772753430045262788),
1194 KQU( 9133991620474991698), KQU( 5665791943316256028),
1195 KQU( 7551996832462193473), KQU(13163729206798953877),
1196 KQU( 9263532074153846374), KQU( 1015460703698618353),
1197 KQU(17929874696989519390), KQU(18257884721466153847),
1198 KQU(16271867543011222991), KQU( 3905971519021791941),
1199 KQU(16814488397137052085), KQU( 1321197685504621613),
1200 KQU( 2870359191894002181), KQU(14317282970323395450),
1201 KQU(13663920845511074366), KQU( 2052463995796539594),
1202 KQU(14126345686431444337), KQU( 1727572121947022534),
1203 KQU(17793552254485594241), KQU( 6738857418849205750),
1204 KQU( 1282987123157442952), KQU(16655480021581159251),
1205 KQU( 6784587032080183866), KQU(14726758805359965162),
1206 KQU( 7577995933961987349), KQU(12539609320311114036),
1207 KQU(10789773033385439494), KQU( 8517001497411158227),
1208 KQU(10075543932136339710), KQU(14838152340938811081),
1209 KQU( 9560840631794044194), KQU(17445736541454117475),
1210 KQU(10633026464336393186), KQU(15705729708242246293),
1211 KQU( 1117517596891411098), KQU( 4305657943415886942),
1212 KQU( 4948856840533979263), KQU(16071681989041789593),
1213 KQU(13723031429272486527), KQU( 7639567622306509462),
1214 KQU(12670424537483090390), KQU( 9715223453097197134),
1215 KQU( 5457173389992686394), KQU( 289857129276135145),
1216 KQU(17048610270521972512), KQU( 692768013309835485),
1217 KQU(14823232360546632057), KQU(18218002361317895936),
1218 KQU( 3281724260212650204), KQU(16453957266549513795),
1219 KQU( 8592711109774511881), KQU( 929825123473369579),
1220 KQU(15966784769764367791), KQU( 9627344291450607588),
1221 KQU(10849555504977813287), KQU( 9234566913936339275),
1222 KQU( 6413807690366911210), KQU(10862389016184219267),
1223 KQU(13842504799335374048), KQU( 1531994113376881174),
1224 KQU( 2081314867544364459), KQU(16430628791616959932),
1225 KQU( 8314714038654394368), KQU( 9155473892098431813),
1226 KQU(12577843786670475704), KQU( 4399161106452401017),
1227 KQU( 1668083091682623186), KQU( 1741383777203714216),
1228 KQU( 2162597285417794374), KQU(15841980159165218736),
1229 KQU( 1971354603551467079), KQU( 1206714764913205968),
1230 KQU( 4790860439591272330), KQU(14699375615594055799),
1231 KQU( 8374423871657449988), KQU(10950685736472937738),
1232 KQU( 697344331343267176), KQU(10084998763118059810),
1233 KQU(12897369539795983124), KQU(12351260292144383605),
1234 KQU( 1268810970176811234), KQU( 7406287800414582768),
1235 KQU( 516169557043807831), KQU( 5077568278710520380),
1236 KQU( 3828791738309039304), KQU( 7721974069946943610),
1237 KQU( 3534670260981096460), KQU( 4865792189600584891),
1238 KQU(16892578493734337298), KQU( 9161499464278042590),
1239 KQU(11976149624067055931), KQU(13219479887277343990),
1240 KQU(14161556738111500680), KQU(14670715255011223056),
1241 KQU( 4671205678403576558), KQU(12633022931454259781),
1242 KQU(14821376219869187646), KQU( 751181776484317028),
1243 KQU( 2192211308839047070), KQU(11787306362361245189),
1244 KQU(10672375120744095707), KQU( 4601972328345244467),
1245 KQU(15457217788831125879), KQU( 8464345256775460809),
1246 KQU(10191938789487159478), KQU( 6184348739615197613),
1247 KQU(11425436778806882100), KQU( 2739227089124319793),
1248 KQU( 461464518456000551), KQU( 4689850170029177442),
1249 KQU( 6120307814374078625), KQU(11153579230681708671),
1250 KQU( 7891721473905347926), KQU(10281646937824872400),
1251 KQU( 3026099648191332248), KQU( 8666750296953273818),
1252 KQU(14978499698844363232), KQU(13303395102890132065),
1253 KQU( 8182358205292864080), KQU(10560547713972971291),
1254 KQU(11981635489418959093), KQU( 3134621354935288409),
1255 KQU(11580681977404383968), KQU(14205530317404088650),
1256 KQU( 5997789011854923157), KQU(13659151593432238041),
1257 KQU(11664332114338865086), KQU( 7490351383220929386),
1258 KQU( 7189290499881530378), KQU(15039262734271020220),
1259 KQU( 2057217285976980055), KQU( 555570804905355739),
1260 KQU(11235311968348555110), KQU(13824557146269603217),
1261 KQU(16906788840653099693), KQU( 7222878245455661677),
1262 KQU( 5245139444332423756), KQU( 4723748462805674292),
1263 KQU(12216509815698568612), KQU(17402362976648951187),
1264 KQU(17389614836810366768), KQU( 4880936484146667711),
1265 KQU( 9085007839292639880), KQU(13837353458498535449),
1266 KQU(11914419854360366677), KQU(16595890135313864103),
1267 KQU( 6313969847197627222), KQU(18296909792163910431),
1268 KQU(10041780113382084042), KQU( 2499478551172884794),
1269 KQU(11057894246241189489), KQU( 9742243032389068555),
1270 KQU(12838934582673196228), KQU(13437023235248490367),
1271 KQU(13372420669446163240), KQU( 6752564244716909224),
1272 KQU( 7157333073400313737), KQU(12230281516370654308),
1273 KQU( 1182884552219419117), KQU( 2955125381312499218),
1274 KQU(10308827097079443249), KQU( 1337648572986534958),
1275 KQU(16378788590020343939), KQU( 108619126514420935),
1276 KQU( 3990981009621629188), KQU( 5460953070230946410),
1277 KQU( 9703328329366531883), KQU(13166631489188077236),
1278 KQU( 1104768831213675170), KQU( 3447930458553877908),
1279 KQU( 8067172487769945676), KQU( 5445802098190775347),
1280 KQU( 3244840981648973873), KQU(17314668322981950060),
1281 KQU( 5006812527827763807), KQU(18158695070225526260),
1282 KQU( 2824536478852417853), KQU(13974775809127519886),
1283 KQU( 9814362769074067392), KQU(17276205156374862128),
1284 KQU(11361680725379306967), KQU( 3422581970382012542),
1285 KQU(11003189603753241266), KQU(11194292945277862261),
1286 KQU( 6839623313908521348), KQU(11935326462707324634),
1287 KQU( 1611456788685878444), KQU(13112620989475558907),
1288 KQU( 517659108904450427), KQU(13558114318574407624),
1289 KQU(15699089742731633077), KQU( 4988979278862685458),
1290 KQU( 8111373583056521297), KQU( 3891258746615399627),
1291 KQU( 8137298251469718086), KQU(12748663295624701649),
1292 KQU( 4389835683495292062), KQU( 5775217872128831729),
1293 KQU( 9462091896405534927), KQU( 8498124108820263989),
1294 KQU( 8059131278842839525), KQU(10503167994254090892),
1295 KQU(11613153541070396656), KQU(18069248738504647790),
1296 KQU( 570657419109768508), KQU( 3950574167771159665),
1297 KQU( 5514655599604313077), KQU( 2908460854428484165),
1298 KQU(10777722615935663114), KQU(12007363304839279486),
1299 KQU( 9800646187569484767), KQU( 8795423564889864287),
1300 KQU(14257396680131028419), KQU( 6405465117315096498),
1301 KQU( 7939411072208774878), KQU(17577572378528990006),
1302 KQU(14785873806715994850), KQU(16770572680854747390),
1303 KQU(18127549474419396481), KQU(11637013449455757750),
1304 KQU(14371851933996761086), KQU( 3601181063650110280),
1305 KQU( 4126442845019316144), KQU(10198287239244320669),
1306 KQU(18000169628555379659), KQU(18392482400739978269),
1307 KQU( 6219919037686919957), KQU( 3610085377719446052),
1308 KQU( 2513925039981776336), KQU(16679413537926716955),
1309 KQU(12903302131714909434), KQU( 5581145789762985009),
1310 KQU(12325955044293303233), KQU(17216111180742141204),
1311 KQU( 6321919595276545740), KQU( 3507521147216174501),
1312 KQU( 9659194593319481840), KQU(11473976005975358326),
1313 KQU(14742730101435987026), KQU( 492845897709954780),
1314 KQU(16976371186162599676), KQU(17712703422837648655),
1315 KQU( 9881254778587061697), KQU( 8413223156302299551),
1316 KQU( 1563841828254089168), KQU( 9996032758786671975),
1317 KQU( 138877700583772667), KQU(13003043368574995989),
1318 KQU( 4390573668650456587), KQU( 8610287390568126755),
1319 KQU(15126904974266642199), KQU( 6703637238986057662),
1320 KQU( 2873075592956810157), KQU( 6035080933946049418),
1321 KQU(13382846581202353014), KQU( 7303971031814642463),
1322 KQU(18418024405307444267), KQU( 5847096731675404647),
1323 KQU( 4035880699639842500), KQU(11525348625112218478),
1324 KQU( 3041162365459574102), KQU( 2604734487727986558),
1325 KQU(15526341771636983145), KQU(14556052310697370254),
1326 KQU(12997787077930808155), KQU( 9601806501755554499),
1327 KQU(11349677952521423389), KQU(14956777807644899350),
1328 KQU(16559736957742852721), KQU(12360828274778140726),
1329 KQU( 6685373272009662513), KQU(16932258748055324130),
1330 KQU(15918051131954158508), KQU( 1692312913140790144),
1331 KQU( 546653826801637367), KQU( 5341587076045986652),
1332 KQU(14975057236342585662), KQU(12374976357340622412),
1333 KQU(10328833995181940552), KQU(12831807101710443149),
1334 KQU(10548514914382545716), KQU( 2217806727199715993),
1335 KQU(12627067369242845138), KQU( 4598965364035438158),
1336 KQU( 150923352751318171), KQU(14274109544442257283),
1337 KQU( 4696661475093863031), KQU( 1505764114384654516),
1338 KQU(10699185831891495147), KQU( 2392353847713620519),
1339 KQU( 3652870166711788383), KQU( 8640653276221911108),
1340 KQU( 3894077592275889704), KQU( 4918592872135964845),
1341 KQU(16379121273281400789), KQU(12058465483591683656),
1342 KQU(11250106829302924945), KQU( 1147537556296983005),
1343 KQU( 6376342756004613268), KQU(14967128191709280506),
1344 KQU(18007449949790627628), KQU( 9497178279316537841),
1345 KQU( 7920174844809394893), KQU(10037752595255719907),
1346 KQU(15875342784985217697), KQU(15311615921712850696),
1347 KQU( 9552902652110992950), KQU(14054979450099721140),
1348 KQU( 5998709773566417349), KQU(18027910339276320187),
1349 KQU( 8223099053868585554), KQU( 7842270354824999767),
1350 KQU( 4896315688770080292), KQU(12969320296569787895),
1351 KQU( 2674321489185759961), KQU( 4053615936864718439),
1352 KQU(11349775270588617578), KQU( 4743019256284553975),
1353 KQU( 5602100217469723769), KQU(14398995691411527813),
1354 KQU( 7412170493796825470), KQU( 836262406131744846),
1355 KQU( 8231086633845153022), KQU( 5161377920438552287),
1356 KQU( 8828731196169924949), KQU(16211142246465502680),
1357 KQU( 3307990879253687818), KQU( 5193405406899782022),
1358 KQU( 8510842117467566693), KQU( 6070955181022405365),
1359 KQU(14482950231361409799), KQU(12585159371331138077),
1360 KQU( 3511537678933588148), KQU( 2041849474531116417),
1361 KQU(10944936685095345792), KQU(18303116923079107729),
1362 KQU( 2720566371239725320), KQU( 4958672473562397622),
1363 KQU( 3032326668253243412), KQU(13689418691726908338),
1364 KQU( 1895205511728843996), KQU( 8146303515271990527),
1365 KQU(16507343500056113480), KQU( 473996939105902919),
1366 KQU( 9897686885246881481), KQU(14606433762712790575),
1367 KQU( 6732796251605566368), KQU( 1399778120855368916),
1368 KQU( 935023885182833777), KQU(16066282816186753477),
1369 KQU( 7291270991820612055), KQU(17530230393129853844),
1370 KQU(10223493623477451366), KQU(15841725630495676683),
1371 KQU(17379567246435515824), KQU( 8588251429375561971),
1372 KQU(18339511210887206423), KQU(17349587430725976100),
1373 KQU(12244876521394838088), KQU( 6382187714147161259),
1374 KQU(12335807181848950831), KQU(16948885622305460665),
1375 KQU(13755097796371520506), KQU(14806740373324947801),
1376 KQU( 4828699633859287703), KQU( 8209879281452301604),
1377 KQU(12435716669553736437), KQU(13970976859588452131),
1378 KQU( 6233960842566773148), KQU(12507096267900505759),
1379 KQU( 1198713114381279421), KQU(14989862731124149015),
1380 KQU(15932189508707978949), KQU( 2526406641432708722),
1381 KQU( 29187427817271982), KQU( 1499802773054556353),
1382 KQU(10816638187021897173), KQU( 5436139270839738132),
1383 KQU( 6659882287036010082), KQU( 2154048955317173697),
1384 KQU(10887317019333757642), KQU(16281091802634424955),
1385 KQU(10754549879915384901), KQU(10760611745769249815),
1386 KQU( 2161505946972504002), KQU( 5243132808986265107),
1387 KQU(10129852179873415416), KQU( 710339480008649081),
1388 KQU( 7802129453068808528), KQU(17967213567178907213),
1389 KQU(15730859124668605599), KQU(13058356168962376502),
1390 KQU( 3701224985413645909), KQU(14464065869149109264),
1391 KQU( 9959272418844311646), KQU(10157426099515958752),
1392 KQU(14013736814538268528), KQU(17797456992065653951),
1393 KQU(17418878140257344806), KQU(15457429073540561521),
1394 KQU( 2184426881360949378), KQU( 2062193041154712416),
1395 KQU( 8553463347406931661), KQU( 4913057625202871854),
1396 KQU( 2668943682126618425), KQU(17064444737891172288),
1397 KQU( 4997115903913298637), KQU(12019402608892327416),
1398 KQU(17603584559765897352), KQU(11367529582073647975),
1399 KQU( 8211476043518436050), KQU( 8676849804070323674),
1400 KQU(18431829230394475730), KQU(10490177861361247904),
1401 KQU( 9508720602025651349), KQU( 7409627448555722700),
1402 KQU( 5804047018862729008), KQU(11943858176893142594),
1403 KQU(11908095418933847092), KQU( 5415449345715887652),
1404 KQU( 1554022699166156407), KQU( 9073322106406017161),
1405 KQU( 7080630967969047082), KQU(18049736940860732943),
1406 KQU(12748714242594196794), KQU( 1226992415735156741),
1407 KQU(17900981019609531193), KQU(11720739744008710999),
1408 KQU( 3006400683394775434), KQU(11347974011751996028),
1409 KQU( 3316999628257954608), KQU( 8384484563557639101),
1410 KQU(18117794685961729767), KQU( 1900145025596618194),
1411 KQU(17459527840632892676), KQU( 5634784101865710994),
1412 KQU( 7918619300292897158), KQU( 3146577625026301350),
1413 KQU( 9955212856499068767), KQU( 1873995843681746975),
1414 KQU( 1561487759967972194), KQU( 8322718804375878474),
1415 KQU(11300284215327028366), KQU( 4667391032508998982),
1416 KQU( 9820104494306625580), KQU(17922397968599970610),
1417 KQU( 1784690461886786712), KQU(14940365084341346821),
1418 KQU( 5348719575594186181), KQU(10720419084507855261),
1419 KQU(14210394354145143274), KQU( 2426468692164000131),
1420 KQU(16271062114607059202), KQU(14851904092357070247),
1421 KQU( 6524493015693121897), KQU( 9825473835127138531),
1422 KQU(14222500616268569578), KQU(15521484052007487468),
1423 KQU(14462579404124614699), KQU(11012375590820665520),
1424 KQU(11625327350536084927), KQU(14452017765243785417),
1425 KQU( 9989342263518766305), KQU( 3640105471101803790),
1426 KQU( 4749866455897513242), KQU(13963064946736312044),
1427 KQU(10007416591973223791), KQU(18314132234717431115),
1428 KQU( 3286596588617483450), KQU( 7726163455370818765),
1429 KQU( 7575454721115379328), KQU( 5308331576437663422),
1430 KQU(18288821894903530934), KQU( 8028405805410554106),
1431 KQU(15744019832103296628), KQU( 149765559630932100),
1432 KQU( 6137705557200071977), KQU(14513416315434803615),
1433 KQU(11665702820128984473), KQU( 218926670505601386),
1434 KQU( 6868675028717769519), KQU(15282016569441512302),
1435 KQU( 5707000497782960236), KQU( 6671120586555079567),
1436 KQU( 2194098052618985448), KQU(16849577895477330978),
1437 KQU(12957148471017466283), KQU( 1997805535404859393),
1438 KQU( 1180721060263860490), KQU(13206391310193756958),
1439 KQU(12980208674461861797), KQU( 3825967775058875366),
1440 KQU(17543433670782042631), KQU( 1518339070120322730),
1441 KQU(16344584340890991669), KQU( 2611327165318529819),
1442 KQU(11265022723283422529), KQU( 4001552800373196817),
1443 KQU(14509595890079346161), KQU( 3528717165416234562),
1444 KQU(18153222571501914072), KQU( 9387182977209744425),
1445 KQU(10064342315985580021), KQU(11373678413215253977),
1446 KQU( 2308457853228798099), KQU( 9729042942839545302),
1447 KQU( 7833785471140127746), KQU( 6351049900319844436),
1448 KQU(14454610627133496067), KQU(12533175683634819111),
1449 KQU(15570163926716513029), KQU(13356980519185762498)
1450};
1451
1452TEST_BEGIN(test_gen_rand_32) {
1453 uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
1454 uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
1455 int i;
1456 uint32_t r32;
1457 sfmt_t *ctx;
1458
1459 expect_d_le(get_min_array_size32(), BLOCK_SIZE,
1460 "Array size too small");
1461 ctx = init_gen_rand(1234);
1462 fill_array32(ctx, array32, BLOCK_SIZE);
1463 fill_array32(ctx, array32_2, BLOCK_SIZE);
1464 fini_gen_rand(ctx);
1465
1466 ctx = init_gen_rand(1234);
1467 for (i = 0; i < BLOCK_SIZE; i++) {
1468 if (i < COUNT_1) {
1469 expect_u32_eq(array32[i], init_gen_rand_32_expected[i],
1470 "Output mismatch for i=%d", i);
1471 }
1472 r32 = gen_rand32(ctx);
1473 expect_u32_eq(r32, array32[i],
1474 "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
1475 }
1476 for (i = 0; i < COUNT_2; i++) {
1477 r32 = gen_rand32(ctx);
1478 expect_u32_eq(r32, array32_2[i],
1479 "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
1480 r32);
1481 }
1482 fini_gen_rand(ctx);
1483}
1484TEST_END
1485
1486TEST_BEGIN(test_by_array_32) {
1487 uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
1488 uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
1489 int i;
1490 uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
1491 uint32_t r32;
1492 sfmt_t *ctx;
1493
1494 expect_d_le(get_min_array_size32(), BLOCK_SIZE,
1495 "Array size too small");
1496 ctx = init_by_array(ini, 4);
1497 fill_array32(ctx, array32, BLOCK_SIZE);
1498 fill_array32(ctx, array32_2, BLOCK_SIZE);
1499 fini_gen_rand(ctx);
1500
1501 ctx = init_by_array(ini, 4);
1502 for (i = 0; i < BLOCK_SIZE; i++) {
1503 if (i < COUNT_1) {
1504 expect_u32_eq(array32[i], init_by_array_32_expected[i],
1505 "Output mismatch for i=%d", i);
1506 }
1507 r32 = gen_rand32(ctx);
1508 expect_u32_eq(r32, array32[i],
1509 "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
1510 }
1511 for (i = 0; i < COUNT_2; i++) {
1512 r32 = gen_rand32(ctx);
1513 expect_u32_eq(r32, array32_2[i],
1514 "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
1515 r32);
1516 }
1517 fini_gen_rand(ctx);
1518}
1519TEST_END
1520
1521TEST_BEGIN(test_gen_rand_64) {
1522 uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
1523 uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
1524 int i;
1525 uint64_t r;
1526 sfmt_t *ctx;
1527
1528 expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
1529 "Array size too small");
1530 ctx = init_gen_rand(4321);
1531 fill_array64(ctx, array64, BLOCK_SIZE64);
1532 fill_array64(ctx, array64_2, BLOCK_SIZE64);
1533 fini_gen_rand(ctx);
1534
1535 ctx = init_gen_rand(4321);
1536 for (i = 0; i < BLOCK_SIZE64; i++) {
1537 if (i < COUNT_1) {
1538 expect_u64_eq(array64[i], init_gen_rand_64_expected[i],
1539 "Output mismatch for i=%d", i);
1540 }
1541 r = gen_rand64(ctx);
1542 expect_u64_eq(r, array64[i],
1543 "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
1544 array64[i], r);
1545 }
1546 for (i = 0; i < COUNT_2; i++) {
1547 r = gen_rand64(ctx);
1548 expect_u64_eq(r, array64_2[i],
1549 "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
1550 array64_2[i], r);
1551 }
1552 fini_gen_rand(ctx);
1553}
1554TEST_END
1555
1556TEST_BEGIN(test_by_array_64) {
1557 uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
1558 uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
1559 int i;
1560 uint64_t r;
1561 uint32_t ini[] = {5, 4, 3, 2, 1};
1562 sfmt_t *ctx;
1563
1564 expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
1565 "Array size too small");
1566 ctx = init_by_array(ini, 5);
1567 fill_array64(ctx, array64, BLOCK_SIZE64);
1568 fill_array64(ctx, array64_2, BLOCK_SIZE64);
1569 fini_gen_rand(ctx);
1570
1571 ctx = init_by_array(ini, 5);
1572 for (i = 0; i < BLOCK_SIZE64; i++) {
1573 if (i < COUNT_1) {
1574 expect_u64_eq(array64[i], init_by_array_64_expected[i],
1575 "Output mismatch for i=%d", i);
1576 }
1577 r = gen_rand64(ctx);
1578 expect_u64_eq(r, array64[i],
1579 "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
1580 array64[i], r);
1581 }
1582 for (i = 0; i < COUNT_2; i++) {
1583 r = gen_rand64(ctx);
1584 expect_u64_eq(r, array64_2[i],
1585 "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
1586 array64_2[i], r);
1587 }
1588 fini_gen_rand(ctx);
1589}
1590TEST_END
1591
1592int
1593main(void) {
1594 return test(
1595 test_gen_rand_32,
1596 test_by_array_32,
1597 test_gen_rand_64,
1598 test_by_array_64);
1599}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/a0.c b/examples/redis-unstable/deps/jemalloc/test/unit/a0.c
deleted file mode 100644
index c1be79a..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/a0.c
+++ /dev/null
@@ -1,16 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_a0) {
4 void *p;
5
6 p = a0malloc(1);
7 expect_ptr_not_null(p, "Unexpected a0malloc() error");
8 a0dalloc(p);
9}
10TEST_END
11
12int
13main(void) {
14 return test_no_malloc_init(
15 test_a0);
16}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.c b/examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.c
deleted file mode 100644
index e991f4d..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.c
+++ /dev/null
@@ -1,436 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "test/arena_util.h"
3
4#include "jemalloc/internal/ticker.h"
5
6static nstime_monotonic_t *nstime_monotonic_orig;
7static nstime_update_t *nstime_update_orig;
8
9static unsigned nupdates_mock;
10static nstime_t time_mock;
11static bool monotonic_mock;
12
13static bool
14nstime_monotonic_mock(void) {
15 return monotonic_mock;
16}
17
18static void
19nstime_update_mock(nstime_t *time) {
20 nupdates_mock++;
21 if (monotonic_mock) {
22 nstime_copy(time, &time_mock);
23 }
24}
25
26TEST_BEGIN(test_decay_ticks) {
27 test_skip_if(is_background_thread_enabled());
28 test_skip_if(opt_hpa);
29
30 ticker_geom_t *decay_ticker;
31 unsigned tick0, tick1, arena_ind;
32 size_t sz, large0;
33 void *p;
34
35 sz = sizeof(size_t);
36 expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
37 0), 0, "Unexpected mallctl failure");
38
39 /* Set up a manually managed arena for test. */
40 arena_ind = do_arena_create(0, 0);
41
42 /* Migrate to the new arena, and get the ticker. */
43 unsigned old_arena_ind;
44 size_t sz_arena_ind = sizeof(old_arena_ind);
45 expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
46 &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
47 "Unexpected mallctl() failure");
48 decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
49 expect_ptr_not_null(decay_ticker,
50 "Unexpected failure getting decay ticker");
51
52 /*
53 * Test the standard APIs using a large size class, since we can't
54 * control tcache interactions for small size classes (except by
55 * completely disabling tcache for the entire test program).
56 */
57
58 /* malloc(). */
59 tick0 = ticker_geom_read(decay_ticker);
60 p = malloc(large0);
61 expect_ptr_not_null(p, "Unexpected malloc() failure");
62 tick1 = ticker_geom_read(decay_ticker);
63 expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
64 /* free(). */
65 tick0 = ticker_geom_read(decay_ticker);
66 free(p);
67 tick1 = ticker_geom_read(decay_ticker);
68 expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
69
70 /* calloc(). */
71 tick0 = ticker_geom_read(decay_ticker);
72 p = calloc(1, large0);
73 expect_ptr_not_null(p, "Unexpected calloc() failure");
74 tick1 = ticker_geom_read(decay_ticker);
75 expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
76 free(p);
77
78 /* posix_memalign(). */
79 tick0 = ticker_geom_read(decay_ticker);
80 expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
81 "Unexpected posix_memalign() failure");
82 tick1 = ticker_geom_read(decay_ticker);
83 expect_u32_ne(tick1, tick0,
84 "Expected ticker to tick during posix_memalign()");
85 free(p);
86
87 /* aligned_alloc(). */
88 tick0 = ticker_geom_read(decay_ticker);
89 p = aligned_alloc(sizeof(size_t), large0);
90 expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
91 tick1 = ticker_geom_read(decay_ticker);
92 expect_u32_ne(tick1, tick0,
93 "Expected ticker to tick during aligned_alloc()");
94 free(p);
95
96 /* realloc(). */
97 /* Allocate. */
98 tick0 = ticker_geom_read(decay_ticker);
99 p = realloc(NULL, large0);
100 expect_ptr_not_null(p, "Unexpected realloc() failure");
101 tick1 = ticker_geom_read(decay_ticker);
102 expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
103 /* Reallocate. */
104 tick0 = ticker_geom_read(decay_ticker);
105 p = realloc(p, large0);
106 expect_ptr_not_null(p, "Unexpected realloc() failure");
107 tick1 = ticker_geom_read(decay_ticker);
108 expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
109 /* Deallocate. */
110 tick0 = ticker_geom_read(decay_ticker);
111 realloc(p, 0);
112 tick1 = ticker_geom_read(decay_ticker);
113 expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
114
115 /*
116 * Test the *allocx() APIs using large and small size classes, with
117 * tcache explicitly disabled.
118 */
119 {
120 unsigned i;
121 size_t allocx_sizes[2];
122 allocx_sizes[0] = large0;
123 allocx_sizes[1] = 1;
124
125 for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
126 sz = allocx_sizes[i];
127
128 /* mallocx(). */
129 tick0 = ticker_geom_read(decay_ticker);
130 p = mallocx(sz, MALLOCX_TCACHE_NONE);
131 expect_ptr_not_null(p, "Unexpected mallocx() failure");
132 tick1 = ticker_geom_read(decay_ticker);
133 expect_u32_ne(tick1, tick0,
134 "Expected ticker to tick during mallocx() (sz=%zu)",
135 sz);
136 /* rallocx(). */
137 tick0 = ticker_geom_read(decay_ticker);
138 p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
139 expect_ptr_not_null(p, "Unexpected rallocx() failure");
140 tick1 = ticker_geom_read(decay_ticker);
141 expect_u32_ne(tick1, tick0,
142 "Expected ticker to tick during rallocx() (sz=%zu)",
143 sz);
144 /* xallocx(). */
145 tick0 = ticker_geom_read(decay_ticker);
146 xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
147 tick1 = ticker_geom_read(decay_ticker);
148 expect_u32_ne(tick1, tick0,
149 "Expected ticker to tick during xallocx() (sz=%zu)",
150 sz);
151 /* dallocx(). */
152 tick0 = ticker_geom_read(decay_ticker);
153 dallocx(p, MALLOCX_TCACHE_NONE);
154 tick1 = ticker_geom_read(decay_ticker);
155 expect_u32_ne(tick1, tick0,
156 "Expected ticker to tick during dallocx() (sz=%zu)",
157 sz);
158 /* sdallocx(). */
159 p = mallocx(sz, MALLOCX_TCACHE_NONE);
160 expect_ptr_not_null(p, "Unexpected mallocx() failure");
161 tick0 = ticker_geom_read(decay_ticker);
162 sdallocx(p, sz, MALLOCX_TCACHE_NONE);
163 tick1 = ticker_geom_read(decay_ticker);
164 expect_u32_ne(tick1, tick0,
165 "Expected ticker to tick during sdallocx() "
166 "(sz=%zu)", sz);
167 }
168 }
169
170 /*
171 * Test tcache fill/flush interactions for large and small size classes,
172 * using an explicit tcache.
173 */
174 unsigned tcache_ind, i;
175 size_t tcache_sizes[2];
176 tcache_sizes[0] = large0;
177 tcache_sizes[1] = 1;
178
179 size_t tcache_max, sz_tcache_max;
180 sz_tcache_max = sizeof(tcache_max);
181 expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
182 &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
183
184 sz = sizeof(unsigned);
185 expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
186 NULL, 0), 0, "Unexpected mallctl failure");
187
188 for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
189 sz = tcache_sizes[i];
190
191 /* tcache fill. */
192 tick0 = ticker_geom_read(decay_ticker);
193 p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
194 expect_ptr_not_null(p, "Unexpected mallocx() failure");
195 tick1 = ticker_geom_read(decay_ticker);
196 expect_u32_ne(tick1, tick0,
197 "Expected ticker to tick during tcache fill "
198 "(sz=%zu)", sz);
199 /* tcache flush. */
200 dallocx(p, MALLOCX_TCACHE(tcache_ind));
201 tick0 = ticker_geom_read(decay_ticker);
202 expect_d_eq(mallctl("tcache.flush", NULL, NULL,
203 (void *)&tcache_ind, sizeof(unsigned)), 0,
204 "Unexpected mallctl failure");
205 tick1 = ticker_geom_read(decay_ticker);
206
207 /* Will only tick if it's in tcache. */
208 expect_u32_ne(tick1, tick0,
209 "Expected ticker to tick during tcache flush (sz=%zu)", sz);
210 }
211}
212TEST_END
213
214static void
215decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
216 uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
217#define NINTERVALS 101
218 nstime_t time, update_interval, decay_ms, deadline;
219
220 nstime_init_update(&time);
221
222 nstime_init2(&decay_ms, dt, 0);
223 nstime_copy(&deadline, &time);
224 nstime_add(&deadline, &decay_ms);
225
226 nstime_init2(&update_interval, dt, 0);
227 nstime_idivide(&update_interval, NINTERVALS);
228
229 /*
230 * Keep q's slab from being deallocated during the looping below. If a
231 * cached slab were to repeatedly come and go during looping, it could
232 * prevent the decay backlog ever becoming empty.
233 */
234 void *p = do_mallocx(1, flags);
235 uint64_t dirty_npurge1, muzzy_npurge1;
236 do {
237 for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
238 i++) {
239 void *q = do_mallocx(1, flags);
240 dallocx(q, flags);
241 }
242 dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
243 muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
244
245 nstime_add(&time_mock, &update_interval);
246 nstime_update(&time);
247 } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
248 dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
249 !terminate_asap));
250 dallocx(p, flags);
251
252 if (config_stats) {
253 expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
254 muzzy_npurge0, "Expected purging to occur");
255 }
256#undef NINTERVALS
257}
258
259TEST_BEGIN(test_decay_ticker) {
260 test_skip_if(is_background_thread_enabled());
261 test_skip_if(opt_hpa);
262#define NPS 2048
263 ssize_t ddt = opt_dirty_decay_ms;
264 ssize_t mdt = opt_muzzy_decay_ms;
265 unsigned arena_ind = do_arena_create(ddt, mdt);
266 int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
267 void *ps[NPS];
268
269 /*
270 * Allocate a bunch of large objects, pause the clock, deallocate every
271 * other object (to fragment virtual memory), restore the clock, then
272 * [md]allocx() in a tight loop while advancing time rapidly to verify
273 * the ticker triggers purging.
274 */
275 size_t large;
276 size_t sz = sizeof(size_t);
277 expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
278 0), 0, "Unexpected mallctl failure");
279
280 do_purge(arena_ind);
281 uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
282 uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
283
284 for (unsigned i = 0; i < NPS; i++) {
285 ps[i] = do_mallocx(large, flags);
286 }
287
288 nupdates_mock = 0;
289 nstime_init_update(&time_mock);
290 monotonic_mock = true;
291
292 nstime_monotonic_orig = nstime_monotonic;
293 nstime_update_orig = nstime_update;
294 nstime_monotonic = nstime_monotonic_mock;
295 nstime_update = nstime_update_mock;
296
297 for (unsigned i = 0; i < NPS; i += 2) {
298 dallocx(ps[i], flags);
299 unsigned nupdates0 = nupdates_mock;
300 do_decay(arena_ind);
301 expect_u_gt(nupdates_mock, nupdates0,
302 "Expected nstime_update() to be called");
303 }
304
305 decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
306 muzzy_npurge0, true);
307 decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
308 muzzy_npurge0, false);
309
310 do_arena_destroy(arena_ind);
311
312 nstime_monotonic = nstime_monotonic_orig;
313 nstime_update = nstime_update_orig;
314#undef NPS
315}
316TEST_END
317
318TEST_BEGIN(test_decay_nonmonotonic) {
319 test_skip_if(is_background_thread_enabled());
320 test_skip_if(opt_hpa);
321#define NPS (SMOOTHSTEP_NSTEPS + 1)
322 int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
323 void *ps[NPS];
324 uint64_t npurge0 = 0;
325 uint64_t npurge1 = 0;
326 size_t sz, large0;
327 unsigned i, nupdates0;
328
329 sz = sizeof(size_t);
330 expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
331 0), 0, "Unexpected mallctl failure");
332
333 expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
334 "Unexpected mallctl failure");
335 do_epoch();
336 sz = sizeof(uint64_t);
337 npurge0 = get_arena_npurge(0);
338
339 nupdates_mock = 0;
340 nstime_init_update(&time_mock);
341 monotonic_mock = false;
342
343 nstime_monotonic_orig = nstime_monotonic;
344 nstime_update_orig = nstime_update;
345 nstime_monotonic = nstime_monotonic_mock;
346 nstime_update = nstime_update_mock;
347
348 for (i = 0; i < NPS; i++) {
349 ps[i] = mallocx(large0, flags);
350 expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
351 }
352
353 for (i = 0; i < NPS; i++) {
354 dallocx(ps[i], flags);
355 nupdates0 = nupdates_mock;
356 expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
357 "Unexpected arena.0.decay failure");
358 expect_u_gt(nupdates_mock, nupdates0,
359 "Expected nstime_update() to be called");
360 }
361
362 do_epoch();
363 sz = sizeof(uint64_t);
364 npurge1 = get_arena_npurge(0);
365
366 if (config_stats) {
367 expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
368 }
369
370 nstime_monotonic = nstime_monotonic_orig;
371 nstime_update = nstime_update_orig;
372#undef NPS
373}
374TEST_END
375
376TEST_BEGIN(test_decay_now) {
377 test_skip_if(is_background_thread_enabled());
378 test_skip_if(opt_hpa);
379
380 unsigned arena_ind = do_arena_create(0, 0);
381 expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
382 expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
383 size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
384 /* Verify that dirty/muzzy pages never linger after deallocation. */
385 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
386 size_t size = sizes[i];
387 generate_dirty(arena_ind, size);
388 expect_zu_eq(get_arena_pdirty(arena_ind), 0,
389 "Unexpected dirty pages");
390 expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
391 "Unexpected muzzy pages");
392 }
393 do_arena_destroy(arena_ind);
394}
395TEST_END
396
397TEST_BEGIN(test_decay_never) {
398 test_skip_if(is_background_thread_enabled() || !config_stats);
399 test_skip_if(opt_hpa);
400
401 unsigned arena_ind = do_arena_create(-1, -1);
402 int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
403 expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
404 expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
405 size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
406 void *ptrs[sizeof(sizes)/sizeof(size_t)];
407 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
408 ptrs[i] = do_mallocx(sizes[i], flags);
409 }
410 /* Verify that each deallocation generates additional dirty pages. */
411 size_t pdirty_prev = get_arena_pdirty(arena_ind);
412 size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
413 expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
414 expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
415 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
416 dallocx(ptrs[i], flags);
417 size_t pdirty = get_arena_pdirty(arena_ind);
418 size_t pmuzzy = get_arena_pmuzzy(arena_ind);
419 expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
420 pdirty_prev, "Expected dirty pages to increase.");
421 expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
422 pdirty_prev = pdirty;
423 }
424 do_arena_destroy(arena_ind);
425}
426TEST_END
427
428int
429main(void) {
430 return test(
431 test_decay_ticks,
432 test_decay_ticker,
433 test_decay_nonmonotonic,
434 test_decay_now,
435 test_decay_never);
436}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.sh b/examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.sh
deleted file mode 100644
index 52f1b20..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/arena_decay.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,tcache_max:1024"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset.c b/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset.c
deleted file mode 100644
index 8ef0786..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset.c
+++ /dev/null
@@ -1,361 +0,0 @@
1#ifndef ARENA_RESET_PROF_C_
2#include "test/jemalloc_test.h"
3#endif
4
5#include "jemalloc/internal/extent_mmap.h"
6#include "jemalloc/internal/rtree.h"
7
8#include "test/extent_hooks.h"
9
10static unsigned
11get_nsizes_impl(const char *cmd) {
12 unsigned ret;
13 size_t z;
14
15 z = sizeof(unsigned);
16 expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
17 "Unexpected mallctl(\"%s\", ...) failure", cmd);
18
19 return ret;
20}
21
22static unsigned
23get_nsmall(void) {
24 return get_nsizes_impl("arenas.nbins");
25}
26
27static unsigned
28get_nlarge(void) {
29 return get_nsizes_impl("arenas.nlextents");
30}
31
32static size_t
33get_size_impl(const char *cmd, size_t ind) {
34 size_t ret;
35 size_t z;
36 size_t mib[4];
37 size_t miblen = 4;
38
39 z = sizeof(size_t);
40 expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
41 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
42 mib[2] = ind;
43 z = sizeof(size_t);
44 expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
45 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
46
47 return ret;
48}
49
50static size_t
51get_small_size(size_t ind) {
52 return get_size_impl("arenas.bin.0.size", ind);
53}
54
55static size_t
56get_large_size(size_t ind) {
57 return get_size_impl("arenas.lextent.0.size", ind);
58}
59
60/* Like ivsalloc(), but safe to call on discarded allocations. */
61static size_t
62vsalloc(tsdn_t *tsdn, const void *ptr) {
63 emap_full_alloc_ctx_t full_alloc_ctx;
64 bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
65 ptr, &full_alloc_ctx);
66 if (missing) {
67 return 0;
68 }
69
70 if (full_alloc_ctx.edata == NULL) {
71 return 0;
72 }
73 if (edata_state_get(full_alloc_ctx.edata) != extent_state_active) {
74 return 0;
75 }
76
77 if (full_alloc_ctx.szind == SC_NSIZES) {
78 return 0;
79 }
80
81 return sz_index2size(full_alloc_ctx.szind);
82}
83
84static unsigned
85do_arena_create(extent_hooks_t *h) {
86 unsigned arena_ind;
87 size_t sz = sizeof(unsigned);
88 expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
89 (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
90 "Unexpected mallctl() failure");
91 return arena_ind;
92}
93
94static void
95do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
96#define NLARGE 32
97 unsigned nsmall, nlarge, i;
98 size_t sz;
99 int flags;
100 tsdn_t *tsdn;
101
102 flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
103
104 nsmall = get_nsmall();
105 nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
106 *nptrs = nsmall + nlarge;
107 *ptrs = (void **)malloc(*nptrs * sizeof(void *));
108 expect_ptr_not_null(*ptrs, "Unexpected malloc() failure");
109
110 /* Allocate objects with a wide range of sizes. */
111 for (i = 0; i < nsmall; i++) {
112 sz = get_small_size(i);
113 (*ptrs)[i] = mallocx(sz, flags);
114 expect_ptr_not_null((*ptrs)[i],
115 "Unexpected mallocx(%zu, %#x) failure", sz, flags);
116 }
117 for (i = 0; i < nlarge; i++) {
118 sz = get_large_size(i);
119 (*ptrs)[nsmall + i] = mallocx(sz, flags);
120 expect_ptr_not_null((*ptrs)[i],
121 "Unexpected mallocx(%zu, %#x) failure", sz, flags);
122 }
123
124 tsdn = tsdn_fetch();
125
126 /* Verify allocations. */
127 for (i = 0; i < *nptrs; i++) {
128 expect_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
129 "Allocation should have queryable size");
130 }
131}
132
133static void
134do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
135 tsdn_t *tsdn;
136 unsigned i;
137
138 tsdn = tsdn_fetch();
139
140 if (have_background_thread) {
141 malloc_mutex_lock(tsdn,
142 &background_thread_info_get(arena_ind)->mtx);
143 }
144 /* Verify allocations no longer exist. */
145 for (i = 0; i < nptrs; i++) {
146 expect_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
147 "Allocation should no longer exist");
148 }
149 if (have_background_thread) {
150 malloc_mutex_unlock(tsdn,
151 &background_thread_info_get(arena_ind)->mtx);
152 }
153
154 free(ptrs);
155}
156
157static void
158do_arena_reset_destroy(const char *name, unsigned arena_ind) {
159 size_t mib[3];
160 size_t miblen;
161
162 miblen = sizeof(mib)/sizeof(size_t);
163 expect_d_eq(mallctlnametomib(name, mib, &miblen), 0,
164 "Unexpected mallctlnametomib() failure");
165 mib[1] = (size_t)arena_ind;
166 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
167 "Unexpected mallctlbymib() failure");
168}
169
170static void
171do_arena_reset(unsigned arena_ind) {
172 do_arena_reset_destroy("arena.0.reset", arena_ind);
173}
174
175static void
176do_arena_destroy(unsigned arena_ind) {
177 do_arena_reset_destroy("arena.0.destroy", arena_ind);
178}
179
180TEST_BEGIN(test_arena_reset) {
181 unsigned arena_ind;
182 void **ptrs;
183 unsigned nptrs;
184
185 arena_ind = do_arena_create(NULL);
186 do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
187 do_arena_reset(arena_ind);
188 do_arena_reset_post(ptrs, nptrs, arena_ind);
189}
190TEST_END
191
192static bool
193arena_i_initialized(unsigned arena_ind, bool refresh) {
194 bool initialized;
195 size_t mib[3];
196 size_t miblen, sz;
197
198 if (refresh) {
199 uint64_t epoch = 1;
200 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
201 sizeof(epoch)), 0, "Unexpected mallctl() failure");
202 }
203
204 miblen = sizeof(mib)/sizeof(size_t);
205 expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
206 "Unexpected mallctlnametomib() failure");
207 mib[1] = (size_t)arena_ind;
208 sz = sizeof(initialized);
209 expect_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
210 0), 0, "Unexpected mallctlbymib() failure");
211
212 return initialized;
213}
214
215TEST_BEGIN(test_arena_destroy_initial) {
216 expect_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
217 "Destroyed arena stats should not be initialized");
218}
219TEST_END
220
221TEST_BEGIN(test_arena_destroy_hooks_default) {
222 unsigned arena_ind, arena_ind_another, arena_ind_prev;
223 void **ptrs;
224 unsigned nptrs;
225
226 arena_ind = do_arena_create(NULL);
227 do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
228
229 expect_false(arena_i_initialized(arena_ind, false),
230 "Arena stats should not be initialized");
231 expect_true(arena_i_initialized(arena_ind, true),
232 "Arena stats should be initialized");
233
234 /*
235 * Create another arena before destroying one, to better verify arena
236 * index reuse.
237 */
238 arena_ind_another = do_arena_create(NULL);
239
240 do_arena_destroy(arena_ind);
241
242 expect_false(arena_i_initialized(arena_ind, true),
243 "Arena stats should not be initialized");
244 expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
245 "Destroyed arena stats should be initialized");
246
247 do_arena_reset_post(ptrs, nptrs, arena_ind);
248
249 arena_ind_prev = arena_ind;
250 arena_ind = do_arena_create(NULL);
251 do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
252 expect_u_eq(arena_ind, arena_ind_prev,
253 "Arena index should have been recycled");
254 do_arena_destroy(arena_ind);
255 do_arena_reset_post(ptrs, nptrs, arena_ind);
256
257 do_arena_destroy(arena_ind_another);
258
259 /* Try arena.create with custom hooks. */
260 size_t sz = sizeof(extent_hooks_t *);
261 extent_hooks_t *a0_default_hooks;
262 expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks,
263 &sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure");
264
265 /* Default impl; but wrapped as "customized". */
266 extent_hooks_t new_hooks = *a0_default_hooks;
267 extent_hooks_t *hook = &new_hooks;
268 sz = sizeof(unsigned);
269 expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
270 (void *)&hook, sizeof(void *)), 0,
271 "Unexpected mallctl() failure");
272 do_arena_destroy(arena_ind);
273}
274TEST_END
275
276/*
277 * Actually unmap extents, regardless of opt_retain, so that attempts to access
278 * a destroyed arena's memory will segfault.
279 */
280static bool
281extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
282 bool committed, unsigned arena_ind) {
283 TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
284 "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
285 "true" : "false", arena_ind);
286 expect_ptr_eq(extent_hooks, &hooks,
287 "extent_hooks should be same as pointer used to set hooks");
288 expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
289 "Wrong hook function");
290 called_dalloc = true;
291 if (!try_dalloc) {
292 return true;
293 }
294 did_dalloc = true;
295 if (!maps_coalesce && opt_retain) {
296 return true;
297 }
298 pages_unmap(addr, size);
299 return false;
300}
301
302static extent_hooks_t hooks_orig;
303
304static extent_hooks_t hooks_unmap = {
305 extent_alloc_hook,
306 extent_dalloc_unmap, /* dalloc */
307 extent_destroy_hook,
308 extent_commit_hook,
309 extent_decommit_hook,
310 extent_purge_lazy_hook,
311 extent_purge_forced_hook,
312 extent_split_hook,
313 extent_merge_hook
314};
315
316TEST_BEGIN(test_arena_destroy_hooks_unmap) {
317 unsigned arena_ind;
318 void **ptrs;
319 unsigned nptrs;
320
321 extent_hooks_prep();
322 if (maps_coalesce) {
323 try_decommit = false;
324 }
325 memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
326 memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
327
328 did_alloc = false;
329 arena_ind = do_arena_create(&hooks);
330 do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
331
332 expect_true(did_alloc, "Expected alloc");
333
334 expect_false(arena_i_initialized(arena_ind, false),
335 "Arena stats should not be initialized");
336 expect_true(arena_i_initialized(arena_ind, true),
337 "Arena stats should be initialized");
338
339 did_dalloc = false;
340 do_arena_destroy(arena_ind);
341 expect_true(did_dalloc, "Expected dalloc");
342
343 expect_false(arena_i_initialized(arena_ind, true),
344 "Arena stats should not be initialized");
345 expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
346 "Destroyed arena stats should be initialized");
347
348 do_arena_reset_post(ptrs, nptrs, arena_ind);
349
350 memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
351}
352TEST_END
353
354int
355main(void) {
356 return test(
357 test_arena_reset,
358 test_arena_destroy_initial,
359 test_arena_destroy_hooks_default,
360 test_arena_destroy_hooks_unmap);
361}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.c b/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.c
deleted file mode 100644
index 38d8012..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.c
+++ /dev/null
@@ -1,4 +0,0 @@
1#include "test/jemalloc_test.h"
2#define ARENA_RESET_PROF_C_
3
4#include "arena_reset.c"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.sh b/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.sh
deleted file mode 100644
index 041dc1c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/arena_reset_prof.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="prof:true,lg_prof_sample:0"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/atomic.c b/examples/redis-unstable/deps/jemalloc/test/unit/atomic.c
deleted file mode 100644
index c2ec8c7..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/atomic.c
+++ /dev/null
@@ -1,229 +0,0 @@
1#include "test/jemalloc_test.h"
2
3/*
4 * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for
5 * bool, etc. The one exception is that the short name for void * is "p" in
6 * some places and "ptr" in others. In the long run it would be nice to unify
7 * these, but in the short run we'll use this shim.
8 */
9#define expect_p_eq expect_ptr_eq
10
11/*
12 * t: the non-atomic type, like "uint32_t".
13 * ta: the short name for the type, like "u32".
14 * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected,
15 * and val3 for desired.
16 */
17
18#define DO_TESTS(t, ta, val1, val2, val3) do { \
19 t val; \
20 t expected; \
21 bool success; \
22 /* This (along with the load below) also tests ATOMIC_LOAD. */ \
23 atomic_##ta##_t atom = ATOMIC_INIT(val1); \
24 \
25 /* ATOMIC_INIT and load. */ \
26 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
27 expect_##ta##_eq(val1, val, "Load or init failed"); \
28 \
29 /* Store. */ \
30 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
31 atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \
32 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
33 expect_##ta##_eq(val2, val, "Store failed"); \
34 \
35 /* Exchange. */ \
36 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
37 val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \
38 expect_##ta##_eq(val1, val, "Exchange returned invalid value"); \
39 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
40 expect_##ta##_eq(val2, val, "Exchange store invalid value"); \
41 \
42 /* \
43 * Weak CAS. Spurious failures are allowed, so we loop a few \
44 * times. \
45 */ \
46 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
47 success = false; \
48 for (int retry = 0; retry < 10 && !success; retry++) { \
49 expected = val2; \
50 success = atomic_compare_exchange_weak_##ta(&atom, \
51 &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
52 expect_##ta##_eq(val1, expected, \
53 "CAS should update expected"); \
54 } \
55 expect_b_eq(val1 == val2, success, \
56 "Weak CAS did the wrong state update"); \
57 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
58 if (success) { \
59 expect_##ta##_eq(val3, val, \
60 "Successful CAS should update atomic"); \
61 } else { \
62 expect_##ta##_eq(val1, val, \
63 "Unsuccessful CAS should not update atomic"); \
64 } \
65 \
66 /* Strong CAS. */ \
67 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
68 expected = val2; \
69 success = atomic_compare_exchange_strong_##ta(&atom, &expected, \
70 val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
71 expect_b_eq(val1 == val2, success, \
72 "Strong CAS did the wrong state update"); \
73 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
74 if (success) { \
75 expect_##ta##_eq(val3, val, \
76 "Successful CAS should update atomic"); \
77 } else { \
78 expect_##ta##_eq(val1, val, \
79 "Unsuccessful CAS should not update atomic"); \
80 } \
81 \
82 \
83} while (0)
84
85#define DO_INTEGER_TESTS(t, ta, val1, val2) do { \
86 atomic_##ta##_t atom; \
87 t val; \
88 \
89 /* Fetch-add. */ \
90 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
91 val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \
92 expect_##ta##_eq(val1, val, \
93 "Fetch-add should return previous value"); \
94 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
95 expect_##ta##_eq(val1 + val2, val, \
96 "Fetch-add should update atomic"); \
97 \
98 /* Fetch-sub. */ \
99 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
100 val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \
101 expect_##ta##_eq(val1, val, \
102 "Fetch-sub should return previous value"); \
103 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
104 expect_##ta##_eq(val1 - val2, val, \
105 "Fetch-sub should update atomic"); \
106 \
107 /* Fetch-and. */ \
108 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
109 val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \
110 expect_##ta##_eq(val1, val, \
111 "Fetch-and should return previous value"); \
112 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
113 expect_##ta##_eq(val1 & val2, val, \
114 "Fetch-and should update atomic"); \
115 \
116 /* Fetch-or. */ \
117 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
118 val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \
119 expect_##ta##_eq(val1, val, \
120 "Fetch-or should return previous value"); \
121 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
122 expect_##ta##_eq(val1 | val2, val, \
123 "Fetch-or should update atomic"); \
124 \
125 /* Fetch-xor. */ \
126 atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
127 val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \
128 expect_##ta##_eq(val1, val, \
129 "Fetch-xor should return previous value"); \
130 val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
131 expect_##ta##_eq(val1 ^ val2, val, \
132 "Fetch-xor should update atomic"); \
133} while (0)
134
135#define TEST_STRUCT(t, ta) \
136typedef struct { \
137 t val1; \
138 t val2; \
139 t val3; \
140} ta##_test_t;
141
142#define TEST_CASES(t) { \
143 {(t)-1, (t)-1, (t)-2}, \
144 {(t)-1, (t) 0, (t)-2}, \
145 {(t)-1, (t) 1, (t)-2}, \
146 \
147 {(t) 0, (t)-1, (t)-2}, \
148 {(t) 0, (t) 0, (t)-2}, \
149 {(t) 0, (t) 1, (t)-2}, \
150 \
151 {(t) 1, (t)-1, (t)-2}, \
152 {(t) 1, (t) 0, (t)-2}, \
153 {(t) 1, (t) 1, (t)-2}, \
154 \
155 {(t)0, (t)-(1 << 22), (t)-2}, \
156 {(t)0, (t)(1 << 22), (t)-2}, \
157 {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \
158 {(t)(1 << 22), (t)(1 << 22), (t)-2} \
159}
160
161#define TEST_BODY(t, ta) do { \
162 const ta##_test_t tests[] = TEST_CASES(t); \
163 for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \
164 ta##_test_t test = tests[i]; \
165 DO_TESTS(t, ta, test.val1, test.val2, test.val3); \
166 } \
167} while (0)
168
169#define INTEGER_TEST_BODY(t, ta) do { \
170 const ta##_test_t tests[] = TEST_CASES(t); \
171 for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \
172 ta##_test_t test = tests[i]; \
173 DO_TESTS(t, ta, test.val1, test.val2, test.val3); \
174 DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \
175 } \
176} while (0)
177
178TEST_STRUCT(uint64_t, u64);
179TEST_BEGIN(test_atomic_u64) {
180#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
181 test_skip("64-bit atomic operations not supported");
182#else
183 INTEGER_TEST_BODY(uint64_t, u64);
184#endif
185}
186TEST_END
187
188
189TEST_STRUCT(uint32_t, u32);
190TEST_BEGIN(test_atomic_u32) {
191 INTEGER_TEST_BODY(uint32_t, u32);
192}
193TEST_END
194
195TEST_STRUCT(void *, p);
196TEST_BEGIN(test_atomic_p) {
197 TEST_BODY(void *, p);
198}
199TEST_END
200
201TEST_STRUCT(size_t, zu);
202TEST_BEGIN(test_atomic_zu) {
203 INTEGER_TEST_BODY(size_t, zu);
204}
205TEST_END
206
207TEST_STRUCT(ssize_t, zd);
208TEST_BEGIN(test_atomic_zd) {
209 INTEGER_TEST_BODY(ssize_t, zd);
210}
211TEST_END
212
213
214TEST_STRUCT(unsigned, u);
215TEST_BEGIN(test_atomic_u) {
216 INTEGER_TEST_BODY(unsigned, u);
217}
218TEST_END
219
220int
221main(void) {
222 return test(
223 test_atomic_u64,
224 test_atomic_u32,
225 test_atomic_p,
226 test_atomic_zu,
227 test_atomic_zd,
228 test_atomic_u);
229}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/background_thread.c b/examples/redis-unstable/deps/jemalloc/test/unit/background_thread.c
deleted file mode 100644
index c60010a..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/background_thread.c
+++ /dev/null
@@ -1,118 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/util.h"
4
5static void
6test_switch_background_thread_ctl(bool new_val) {
7 bool e0, e1;
8 size_t sz = sizeof(bool);
9
10 e1 = new_val;
11 expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
12 &e1, sz), 0, "Unexpected mallctl() failure");
13 expect_b_eq(e0, !e1,
14 "background_thread should be %d before.\n", !e1);
15 if (e1) {
16 expect_zu_gt(n_background_threads, 0,
17 "Number of background threads should be non zero.\n");
18 } else {
19 expect_zu_eq(n_background_threads, 0,
20 "Number of background threads should be zero.\n");
21 }
22}
23
24static void
25test_repeat_background_thread_ctl(bool before) {
26 bool e0, e1;
27 size_t sz = sizeof(bool);
28
29 e1 = before;
30 expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
31 &e1, sz), 0, "Unexpected mallctl() failure");
32 expect_b_eq(e0, before,
33 "background_thread should be %d.\n", before);
34 if (e1) {
35 expect_zu_gt(n_background_threads, 0,
36 "Number of background threads should be non zero.\n");
37 } else {
38 expect_zu_eq(n_background_threads, 0,
39 "Number of background threads should be zero.\n");
40 }
41}
42
43TEST_BEGIN(test_background_thread_ctl) {
44 test_skip_if(!have_background_thread);
45
46 bool e0, e1;
47 size_t sz = sizeof(bool);
48
49 expect_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
50 NULL, 0), 0, "Unexpected mallctl() failure");
51 expect_d_eq(mallctl("background_thread", (void *)&e1, &sz,
52 NULL, 0), 0, "Unexpected mallctl() failure");
53 expect_b_eq(e0, e1,
54 "Default and opt.background_thread does not match.\n");
55 if (e0) {
56 test_switch_background_thread_ctl(false);
57 }
58 expect_zu_eq(n_background_threads, 0,
59 "Number of background threads should be 0.\n");
60
61 for (unsigned i = 0; i < 4; i++) {
62 test_switch_background_thread_ctl(true);
63 test_repeat_background_thread_ctl(true);
64 test_repeat_background_thread_ctl(true);
65
66 test_switch_background_thread_ctl(false);
67 test_repeat_background_thread_ctl(false);
68 test_repeat_background_thread_ctl(false);
69 }
70}
71TEST_END
72
73TEST_BEGIN(test_background_thread_running) {
74 test_skip_if(!have_background_thread);
75 test_skip_if(!config_stats);
76
77#if defined(JEMALLOC_BACKGROUND_THREAD)
78 tsd_t *tsd = tsd_fetch();
79 background_thread_info_t *info = &background_thread_info[0];
80
81 test_repeat_background_thread_ctl(false);
82 test_switch_background_thread_ctl(true);
83 expect_b_eq(info->state, background_thread_started,
84 "Background_thread did not start.\n");
85
86 nstime_t start;
87 nstime_init_update(&start);
88
89 bool ran = false;
90 while (true) {
91 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
92 if (info->tot_n_runs > 0) {
93 ran = true;
94 }
95 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
96 if (ran) {
97 break;
98 }
99
100 nstime_t now;
101 nstime_init_update(&now);
102 nstime_subtract(&now, &start);
103 expect_u64_lt(nstime_sec(&now), 1000,
104 "Background threads did not run for 1000 seconds.");
105 sleep(1);
106 }
107 test_switch_background_thread_ctl(false);
108#endif
109}
110TEST_END
111
112int
113main(void) {
114 /* Background_thread creation tests reentrancy naturally. */
115 return test_no_reentrancy(
116 test_background_thread_ctl,
117 test_background_thread_running);
118}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/background_thread_enable.c b/examples/redis-unstable/deps/jemalloc/test/unit/background_thread_enable.c
deleted file mode 100644
index 44034ac..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/background_thread_enable.c
+++ /dev/null
@@ -1,96 +0,0 @@
1#include "test/jemalloc_test.h"
2
3const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20";
4
5static unsigned
6max_test_narenas(void) {
7 /*
8 * 10 here is somewhat arbitrary, except insofar as we want to ensure
9 * that the number of background threads is smaller than the number of
10 * arenas. I'll ragequit long before we have to spin up 10 threads per
11 * cpu to handle background purging, so this is a conservative
12 * approximation.
13 */
14 unsigned ret = 10 * ncpus;
15 /* Limit the max to avoid VM exhaustion on 32-bit . */
16 if (ret > 512) {
17 ret = 512;
18 }
19
20 return ret;
21}
22
23TEST_BEGIN(test_deferred) {
24 test_skip_if(!have_background_thread);
25
26 unsigned id;
27 size_t sz_u = sizeof(unsigned);
28
29 for (unsigned i = 0; i < max_test_narenas(); i++) {
30 expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
31 "Failed to create arena");
32 }
33
34 bool enable = true;
35 size_t sz_b = sizeof(bool);
36 expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
37 "Failed to enable background threads");
38 enable = false;
39 expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
40 "Failed to disable background threads");
41}
42TEST_END
43
44TEST_BEGIN(test_max_background_threads) {
45 test_skip_if(!have_background_thread);
46
47 size_t max_n_thds;
48 size_t opt_max_n_thds;
49 size_t sz_m = sizeof(max_n_thds);
50 expect_d_eq(mallctl("opt.max_background_threads",
51 &opt_max_n_thds, &sz_m, NULL, 0), 0,
52 "Failed to get opt.max_background_threads");
53 expect_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
54 0), 0, "Failed to get max background threads");
55 expect_zu_eq(opt_max_n_thds, max_n_thds,
56 "max_background_threads and "
57 "opt.max_background_threads should match");
58 expect_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
59 sz_m), 0, "Failed to set max background threads");
60
61 unsigned id;
62 size_t sz_u = sizeof(unsigned);
63
64 for (unsigned i = 0; i < max_test_narenas(); i++) {
65 expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
66 "Failed to create arena");
67 }
68
69 bool enable = true;
70 size_t sz_b = sizeof(bool);
71 expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
72 "Failed to enable background threads");
73 expect_zu_eq(n_background_threads, max_n_thds,
74 "Number of background threads should not change.\n");
75 size_t new_max_thds = max_n_thds - 1;
76 if (new_max_thds > 0) {
77 expect_d_eq(mallctl("max_background_threads", NULL, NULL,
78 &new_max_thds, sz_m), 0,
79 "Failed to set max background threads");
80 expect_zu_eq(n_background_threads, new_max_thds,
81 "Number of background threads should decrease by 1.\n");
82 }
83 new_max_thds = 1;
84 expect_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
85 sz_m), 0, "Failed to set max background threads");
86 expect_zu_eq(n_background_threads, new_max_thds,
87 "Number of background threads should be 1.\n");
88}
89TEST_END
90
91int
92main(void) {
93 return test_no_reentrancy(
94 test_deferred,
95 test_max_background_threads);
96}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/base.c b/examples/redis-unstable/deps/jemalloc/test/unit/base.c
deleted file mode 100644
index 15e04a8..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/base.c
+++ /dev/null
@@ -1,265 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "test/extent_hooks.h"
4
5static extent_hooks_t hooks_null = {
6 extent_alloc_hook,
7 NULL, /* dalloc */
8 NULL, /* destroy */
9 NULL, /* commit */
10 NULL, /* decommit */
11 NULL, /* purge_lazy */
12 NULL, /* purge_forced */
13 NULL, /* split */
14 NULL /* merge */
15};
16
17static extent_hooks_t hooks_not_null = {
18 extent_alloc_hook,
19 extent_dalloc_hook,
20 extent_destroy_hook,
21 NULL, /* commit */
22 extent_decommit_hook,
23 extent_purge_lazy_hook,
24 extent_purge_forced_hook,
25 NULL, /* split */
26 NULL /* merge */
27};
28
29TEST_BEGIN(test_base_hooks_default) {
30 base_t *base;
31 size_t allocated0, allocated1, resident, mapped, n_thp;
32
33 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
34 base = base_new(tsdn, 0,
35 (extent_hooks_t *)&ehooks_default_extent_hooks,
36 /* metadata_use_hooks */ true);
37
38 if (config_stats) {
39 base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
40 &n_thp);
41 expect_zu_ge(allocated0, sizeof(base_t),
42 "Base header should count as allocated");
43 if (opt_metadata_thp == metadata_thp_always) {
44 expect_zu_gt(n_thp, 0,
45 "Base should have 1 THP at least.");
46 }
47 }
48
49 expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
50 "Unexpected base_alloc() failure");
51
52 if (config_stats) {
53 base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
54 &n_thp);
55 expect_zu_ge(allocated1 - allocated0, 42,
56 "At least 42 bytes were allocated by base_alloc()");
57 }
58
59 base_delete(tsdn, base);
60}
61TEST_END
62
63TEST_BEGIN(test_base_hooks_null) {
64 extent_hooks_t hooks_orig;
65 base_t *base;
66 size_t allocated0, allocated1, resident, mapped, n_thp;
67
68 extent_hooks_prep();
69 try_dalloc = false;
70 try_destroy = true;
71 try_decommit = false;
72 try_purge_lazy = false;
73 try_purge_forced = false;
74 memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
75 memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t));
76
77 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
78 base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
79 expect_ptr_not_null(base, "Unexpected base_new() failure");
80
81 if (config_stats) {
82 base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
83 &n_thp);
84 expect_zu_ge(allocated0, sizeof(base_t),
85 "Base header should count as allocated");
86 if (opt_metadata_thp == metadata_thp_always) {
87 expect_zu_gt(n_thp, 0,
88 "Base should have 1 THP at least.");
89 }
90 }
91
92 expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
93 "Unexpected base_alloc() failure");
94
95 if (config_stats) {
96 base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
97 &n_thp);
98 expect_zu_ge(allocated1 - allocated0, 42,
99 "At least 42 bytes were allocated by base_alloc()");
100 }
101
102 base_delete(tsdn, base);
103
104 memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
105}
106TEST_END
107
108TEST_BEGIN(test_base_hooks_not_null) {
109 extent_hooks_t hooks_orig;
110 base_t *base;
111 void *p, *q, *r, *r_exp;
112
113 extent_hooks_prep();
114 try_dalloc = false;
115 try_destroy = true;
116 try_decommit = false;
117 try_purge_lazy = false;
118 try_purge_forced = false;
119 memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
120 memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
121
122 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
123 did_alloc = false;
124 base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
125 expect_ptr_not_null(base, "Unexpected base_new() failure");
126 expect_true(did_alloc, "Expected alloc");
127
128 /*
129 * Check for tight packing at specified alignment under simple
130 * conditions.
131 */
132 {
133 const size_t alignments[] = {
134 1,
135 QUANTUM,
136 QUANTUM << 1,
137 CACHELINE,
138 CACHELINE << 1,
139 };
140 unsigned i;
141
142 for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
143 size_t alignment = alignments[i];
144 size_t align_ceil = ALIGNMENT_CEILING(alignment,
145 QUANTUM);
146 p = base_alloc(tsdn, base, 1, alignment);
147 expect_ptr_not_null(p,
148 "Unexpected base_alloc() failure");
149 expect_ptr_eq(p,
150 (void *)(ALIGNMENT_CEILING((uintptr_t)p,
151 alignment)), "Expected quantum alignment");
152 q = base_alloc(tsdn, base, alignment, alignment);
153 expect_ptr_not_null(q,
154 "Unexpected base_alloc() failure");
155 expect_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
156 "Minimal allocation should take up %zu bytes",
157 align_ceil);
158 r = base_alloc(tsdn, base, 1, alignment);
159 expect_ptr_not_null(r,
160 "Unexpected base_alloc() failure");
161 expect_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
162 "Minimal allocation should take up %zu bytes",
163 align_ceil);
164 }
165 }
166
167 /*
168 * Allocate an object that cannot fit in the first block, then verify
169 * that the first block's remaining space is considered for subsequent
170 * allocation.
171 */
172 expect_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
173 "Remainder insufficient for test");
174 /* Use up all but one quantum of block. */
175 while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
176 p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
177 expect_ptr_not_null(p, "Unexpected base_alloc() failure");
178 }
179 r_exp = edata_addr_get(&base->blocks->edata);
180 expect_zu_eq(base->extent_sn_next, 1, "One extant block expected");
181 q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
182 expect_ptr_not_null(q, "Unexpected base_alloc() failure");
183 expect_ptr_ne(q, r_exp, "Expected allocation from new block");
184 expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
185 r = base_alloc(tsdn, base, QUANTUM, QUANTUM);
186 expect_ptr_not_null(r, "Unexpected base_alloc() failure");
187 expect_ptr_eq(r, r_exp, "Expected allocation from first block");
188 expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
189
190 /*
191 * Check for proper alignment support when normal blocks are too small.
192 */
193 {
194 const size_t alignments[] = {
195 HUGEPAGE,
196 HUGEPAGE << 1
197 };
198 unsigned i;
199
200 for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
201 size_t alignment = alignments[i];
202 p = base_alloc(tsdn, base, QUANTUM, alignment);
203 expect_ptr_not_null(p,
204 "Unexpected base_alloc() failure");
205 expect_ptr_eq(p,
206 (void *)(ALIGNMENT_CEILING((uintptr_t)p,
207 alignment)), "Expected %zu-byte alignment",
208 alignment);
209 }
210 }
211
212 called_dalloc = called_destroy = called_decommit = called_purge_lazy =
213 called_purge_forced = false;
214 base_delete(tsdn, base);
215 expect_true(called_dalloc, "Expected dalloc call");
216 expect_true(!called_destroy, "Unexpected destroy call");
217 expect_true(called_decommit, "Expected decommit call");
218 expect_true(called_purge_lazy, "Expected purge_lazy call");
219 expect_true(called_purge_forced, "Expected purge_forced call");
220
221 try_dalloc = true;
222 try_destroy = true;
223 try_decommit = true;
224 try_purge_lazy = true;
225 try_purge_forced = true;
226 memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
227}
228TEST_END
229
230TEST_BEGIN(test_base_ehooks_get_for_metadata_default_hook) {
231 extent_hooks_prep();
232 memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
233 base_t *base;
234 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
235 base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ false);
236 ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
237 expect_true(ehooks_are_default(ehooks),
238 "Expected default extent hook functions pointer");
239 base_delete(tsdn, base);
240}
241TEST_END
242
243
244TEST_BEGIN(test_base_ehooks_get_for_metadata_custom_hook) {
245 extent_hooks_prep();
246 memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
247 base_t *base;
248 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
249 base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
250 ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
251 expect_ptr_eq(&hooks, ehooks_get_extent_hooks_ptr(ehooks),
252 "Expected user-specified extend hook functions pointer");
253 base_delete(tsdn, base);
254}
255TEST_END
256
257int
258main(void) {
259 return test(
260 test_base_hooks_default,
261 test_base_hooks_null,
262 test_base_hooks_not_null,
263 test_base_ehooks_get_for_metadata_default_hook,
264 test_base_ehooks_get_for_metadata_custom_hook);
265}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.c b/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.c
deleted file mode 100644
index 901c52b..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.c
+++ /dev/null
@@ -1,189 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define BATCH_MAX ((1U << 16) + 1024)
4static void *global_ptrs[BATCH_MAX];
5
6#define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0)
7
8static void
9verify_batch_basic(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
10 bool zero) {
11 for (size_t i = 0; i < batch; ++i) {
12 void *p = ptrs[i];
13 expect_zu_eq(isalloc(tsd_tsdn(tsd), p), usize, "");
14 if (zero) {
15 for (size_t k = 0; k < usize; ++k) {
16 expect_true(*((unsigned char *)p + k) == 0, "");
17 }
18 }
19 }
20}
21
22static void
23verify_batch_locality(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
24 arena_t *arena, unsigned nregs) {
25 if (config_prof && opt_prof) {
26 /*
27 * Checking batch locality when prof is on is feasible but
28 * complicated, while checking the non-prof case suffices for
29 * unit-test purpose.
30 */
31 return;
32 }
33 for (size_t i = 0, j = 0; i < batch; ++i, ++j) {
34 if (j == nregs) {
35 j = 0;
36 }
37 if (j == 0 && batch - i < nregs) {
38 break;
39 }
40 void *p = ptrs[i];
41 expect_ptr_eq(iaalloc(tsd_tsdn(tsd), p), arena, "");
42 if (j == 0) {
43 expect_true(PAGE_ALIGNED(p), "");
44 continue;
45 }
46 assert(i > 0);
47 void *q = ptrs[i - 1];
48 expect_true((uintptr_t)p > (uintptr_t)q
49 && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, "");
50 }
51}
52
53static void
54release_batch(void **ptrs, size_t batch, size_t size) {
55 for (size_t i = 0; i < batch; ++i) {
56 sdallocx(ptrs[i], size, 0);
57 }
58}
59
60typedef struct batch_alloc_packet_s batch_alloc_packet_t;
61struct batch_alloc_packet_s {
62 void **ptrs;
63 size_t num;
64 size_t size;
65 int flags;
66};
67
68static size_t
69batch_alloc_wrapper(void **ptrs, size_t num, size_t size, int flags) {
70 batch_alloc_packet_t batch_alloc_packet = {ptrs, num, size, flags};
71 size_t filled;
72 size_t len = sizeof(size_t);
73 assert_d_eq(mallctl("experimental.batch_alloc", &filled, &len,
74 &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
75 return filled;
76}
77
78static void
79test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) {
80 tsd_t *tsd = tsd_fetch();
81 assert(tsd != NULL);
82 const size_t usize =
83 (alignment != 0 ? sz_sa2u(size, alignment) : sz_s2u(size));
84 const szind_t ind = sz_size2index(usize);
85 const bin_info_t *bin_info = &bin_infos[ind];
86 const unsigned nregs = bin_info->nregs;
87 assert(nregs > 0);
88 arena_t *arena;
89 if (arena_flag != 0) {
90 arena = arena_get(tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag),
91 false);
92 } else {
93 arena = arena_choose(tsd, NULL);
94 }
95 assert(arena != NULL);
96 int flags = arena_flag;
97 if (alignment != 0) {
98 flags |= MALLOCX_ALIGN(alignment);
99 }
100 if (zero) {
101 flags |= MALLOCX_ZERO;
102 }
103
104 /*
105 * Allocate for the purpose of bootstrapping arena_tdata, so that the
106 * change in bin stats won't contaminate the stats to be verified below.
107 */
108 void *p = mallocx(size, flags | MALLOCX_TCACHE_NONE);
109
110 for (size_t i = 0; i < 4; ++i) {
111 size_t base = 0;
112 if (i == 1) {
113 base = nregs;
114 } else if (i == 2) {
115 base = nregs * 2;
116 } else if (i == 3) {
117 base = (1 << 16);
118 }
119 for (int j = -1; j <= 1; ++j) {
120 if (base == 0 && j == -1) {
121 continue;
122 }
123 size_t batch = base + (size_t)j;
124 assert(batch < BATCH_MAX);
125 size_t filled = batch_alloc_wrapper(global_ptrs, batch,
126 size, flags);
127 assert_zu_eq(filled, batch, "");
128 verify_batch_basic(tsd, global_ptrs, batch, usize,
129 zero);
130 verify_batch_locality(tsd, global_ptrs, batch, usize,
131 arena, nregs);
132 release_batch(global_ptrs, batch, usize);
133 }
134 }
135
136 free(p);
137}
138
139TEST_BEGIN(test_batch_alloc) {
140 test_wrapper(11, 0, false, 0);
141}
142TEST_END
143
144TEST_BEGIN(test_batch_alloc_zero) {
145 test_wrapper(11, 0, true, 0);
146}
147TEST_END
148
149TEST_BEGIN(test_batch_alloc_aligned) {
150 test_wrapper(7, 16, false, 0);
151}
152TEST_END
153
154TEST_BEGIN(test_batch_alloc_manual_arena) {
155 unsigned arena_ind;
156 size_t len_unsigned = sizeof(unsigned);
157 assert_d_eq(mallctl("arenas.create", &arena_ind, &len_unsigned, NULL,
158 0), 0, "");
159 test_wrapper(11, 0, false, MALLOCX_ARENA(arena_ind));
160}
161TEST_END
162
163TEST_BEGIN(test_batch_alloc_large) {
164 size_t size = SC_LARGE_MINCLASS;
165 for (size_t batch = 0; batch < 4; ++batch) {
166 assert(batch < BATCH_MAX);
167 size_t filled = batch_alloc(global_ptrs, batch, size, 0);
168 assert_zu_eq(filled, batch, "");
169 release_batch(global_ptrs, batch, size);
170 }
171 size = tcache_maxclass + 1;
172 for (size_t batch = 0; batch < 4; ++batch) {
173 assert(batch < BATCH_MAX);
174 size_t filled = batch_alloc(global_ptrs, batch, size, 0);
175 assert_zu_eq(filled, batch, "");
176 release_batch(global_ptrs, batch, size);
177 }
178}
179TEST_END
180
181int
182main(void) {
183 return test(
184 test_batch_alloc,
185 test_batch_alloc_zero,
186 test_batch_alloc_aligned,
187 test_batch_alloc_manual_arena,
188 test_batch_alloc_large);
189}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.sh b/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.sh
deleted file mode 100644
index 9d81010..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="tcache_gc_incr_bytes:2147483648"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.c b/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.c
deleted file mode 100644
index ef64458..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "batch_alloc.c"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.sh b/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.sh
deleted file mode 100644
index a2697a6..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/batch_alloc_prof.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="prof:true,lg_prof_sample:14"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/binshard.c b/examples/redis-unstable/deps/jemalloc/test/unit/binshard.c
deleted file mode 100644
index 040ea54..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/binshard.c
+++ /dev/null
@@ -1,154 +0,0 @@
1#include "test/jemalloc_test.h"
2
3/* Config -- "narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" */
4
5#define NTHREADS 16
6#define REMOTE_NALLOC 256
7
8static void *
9thd_producer(void *varg) {
10 void **mem = varg;
11 unsigned arena, i;
12 size_t sz;
13
14 sz = sizeof(arena);
15 /* Remote arena. */
16 expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
17 "Unexpected mallctl() failure");
18 for (i = 0; i < REMOTE_NALLOC / 2; i++) {
19 mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena));
20 }
21
22 /* Remote bin. */
23 for (; i < REMOTE_NALLOC; i++) {
24 mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(0));
25 }
26
27 return NULL;
28}
29
30TEST_BEGIN(test_producer_consumer) {
31 thd_t thds[NTHREADS];
32 void *mem[NTHREADS][REMOTE_NALLOC];
33 unsigned i;
34
35 /* Create producer threads to allocate. */
36 for (i = 0; i < NTHREADS; i++) {
37 thd_create(&thds[i], thd_producer, mem[i]);
38 }
39 for (i = 0; i < NTHREADS; i++) {
40 thd_join(thds[i], NULL);
41 }
42 /* Remote deallocation by the current thread. */
43 for (i = 0; i < NTHREADS; i++) {
44 for (unsigned j = 0; j < REMOTE_NALLOC; j++) {
45 expect_ptr_not_null(mem[i][j],
46 "Unexpected remote allocation failure");
47 dallocx(mem[i][j], 0);
48 }
49 }
50}
51TEST_END
52
53static void *
54thd_start(void *varg) {
55 void *ptr, *ptr2;
56 edata_t *edata;
57 unsigned shard1, shard2;
58
59 tsdn_t *tsdn = tsdn_fetch();
60 /* Try triggering allocations from sharded bins. */
61 for (unsigned i = 0; i < 1024; i++) {
62 ptr = mallocx(1, MALLOCX_TCACHE_NONE);
63 ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
64
65 edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
66 shard1 = edata_binshard_get(edata);
67 dallocx(ptr, 0);
68 expect_u_lt(shard1, 16, "Unexpected bin shard used");
69
70 edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr2);
71 shard2 = edata_binshard_get(edata);
72 dallocx(ptr2, 0);
73 expect_u_lt(shard2, 4, "Unexpected bin shard used");
74
75 if (shard1 > 0 || shard2 > 0) {
76 /* Triggered sharded bin usage. */
77 return (void *)(uintptr_t)shard1;
78 }
79 }
80
81 return NULL;
82}
83
84TEST_BEGIN(test_bin_shard_mt) {
85 test_skip_if(have_percpu_arena &&
86 PERCPU_ARENA_ENABLED(opt_percpu_arena));
87
88 thd_t thds[NTHREADS];
89 unsigned i;
90 for (i = 0; i < NTHREADS; i++) {
91 thd_create(&thds[i], thd_start, NULL);
92 }
93 bool sharded = false;
94 for (i = 0; i < NTHREADS; i++) {
95 void *ret;
96 thd_join(thds[i], &ret);
97 if (ret != NULL) {
98 sharded = true;
99 }
100 }
101 expect_b_eq(sharded, true, "Did not find sharded bins");
102}
103TEST_END
104
105TEST_BEGIN(test_bin_shard) {
106 unsigned nbins, i;
107 size_t mib[4], mib2[4];
108 size_t miblen, miblen2, len;
109
110 len = sizeof(nbins);
111 expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
112 "Unexpected mallctl() failure");
113
114 miblen = 4;
115 expect_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
116 "Unexpected mallctlnametomib() failure");
117 miblen2 = 4;
118 expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
119 "Unexpected mallctlnametomib() failure");
120
121 for (i = 0; i < nbins; i++) {
122 uint32_t nshards;
123 size_t size, sz1, sz2;
124
125 mib[2] = i;
126 sz1 = sizeof(nshards);
127 expect_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
128 NULL, 0), 0, "Unexpected mallctlbymib() failure");
129
130 mib2[2] = i;
131 sz2 = sizeof(size);
132 expect_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
133 NULL, 0), 0, "Unexpected mallctlbymib() failure");
134
135 if (size >= 1 && size <= 128) {
136 expect_u_eq(nshards, 16, "Unexpected nshards");
137 } else if (size == 256) {
138 expect_u_eq(nshards, 8, "Unexpected nshards");
139 } else if (size > 128 && size <= 512) {
140 expect_u_eq(nshards, 4, "Unexpected nshards");
141 } else {
142 expect_u_eq(nshards, 1, "Unexpected nshards");
143 }
144 }
145}
146TEST_END
147
148int
149main(void) {
150 return test_no_reentrancy(
151 test_bin_shard,
152 test_bin_shard_mt,
153 test_producer_consumer);
154}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/binshard.sh b/examples/redis-unstable/deps/jemalloc/test/unit/binshard.sh
deleted file mode 100644
index c1d58c8..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/binshard.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/bit_util.c b/examples/redis-unstable/deps/jemalloc/test/unit/bit_util.c
deleted file mode 100644
index 7d31b21..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/bit_util.c
+++ /dev/null
@@ -1,307 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/bit_util.h"
4
5#define TEST_POW2_CEIL(t, suf, pri) do { \
6 unsigned i, pow2; \
7 t x; \
8 \
9 expect_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
10 \
11 for (i = 0; i < sizeof(t) * 8; i++) { \
12 expect_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
13 << i, "Unexpected result"); \
14 } \
15 \
16 for (i = 2; i < sizeof(t) * 8; i++) { \
17 expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
18 ((t)1) << i, "Unexpected result"); \
19 } \
20 \
21 for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
22 expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
23 ((t)1) << (i+1), "Unexpected result"); \
24 } \
25 \
26 for (pow2 = 1; pow2 < 25; pow2++) { \
27 for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
28 x++) { \
29 expect_##suf##_eq(pow2_ceil_##suf(x), \
30 ((t)1) << pow2, \
31 "Unexpected result, x=%"pri, x); \
32 } \
33 } \
34} while (0)
35
36TEST_BEGIN(test_pow2_ceil_u64) {
37 TEST_POW2_CEIL(uint64_t, u64, FMTu64);
38}
39TEST_END
40
41TEST_BEGIN(test_pow2_ceil_u32) {
42 TEST_POW2_CEIL(uint32_t, u32, FMTu32);
43}
44TEST_END
45
46TEST_BEGIN(test_pow2_ceil_zu) {
47 TEST_POW2_CEIL(size_t, zu, "zu");
48}
49TEST_END
50
51void
52expect_lg_ceil_range(size_t input, unsigned answer) {
53 if (input == 1) {
54 expect_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
55 return;
56 }
57 expect_zu_le(input, (ZU(1) << answer),
58 "Got %u as lg_ceil of %zu", answer, input);
59 expect_zu_gt(input, (ZU(1) << (answer - 1)),
60 "Got %u as lg_ceil of %zu", answer, input);
61}
62
63void
64expect_lg_floor_range(size_t input, unsigned answer) {
65 if (input == 1) {
66 expect_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
67 return;
68 }
69 expect_zu_ge(input, (ZU(1) << answer),
70 "Got %u as lg_floor of %zu", answer, input);
71 expect_zu_lt(input, (ZU(1) << (answer + 1)),
72 "Got %u as lg_floor of %zu", answer, input);
73}
74
75TEST_BEGIN(test_lg_ceil_floor) {
76 for (size_t i = 1; i < 10 * 1000 * 1000; i++) {
77 expect_lg_ceil_range(i, lg_ceil(i));
78 expect_lg_ceil_range(i, LG_CEIL(i));
79 expect_lg_floor_range(i, lg_floor(i));
80 expect_lg_floor_range(i, LG_FLOOR(i));
81 }
82 for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) {
83 for (size_t j = 0; j < (1 << 4); j++) {
84 size_t num1 = ((size_t)1 << i)
85 - j * ((size_t)1 << (i - 4));
86 size_t num2 = ((size_t)1 << i)
87 + j * ((size_t)1 << (i - 4));
88 expect_zu_ne(num1, 0, "Invalid lg argument");
89 expect_zu_ne(num2, 0, "Invalid lg argument");
90 expect_lg_ceil_range(num1, lg_ceil(num1));
91 expect_lg_ceil_range(num1, LG_CEIL(num1));
92 expect_lg_ceil_range(num2, lg_ceil(num2));
93 expect_lg_ceil_range(num2, LG_CEIL(num2));
94
95 expect_lg_floor_range(num1, lg_floor(num1));
96 expect_lg_floor_range(num1, LG_FLOOR(num1));
97 expect_lg_floor_range(num2, lg_floor(num2));
98 expect_lg_floor_range(num2, LG_FLOOR(num2));
99 }
100 }
101}
102TEST_END
103
104#define TEST_FFS(t, suf, test_suf, pri) do { \
105 for (unsigned i = 0; i < sizeof(t) * 8; i++) { \
106 for (unsigned j = 0; j <= i; j++) { \
107 for (unsigned k = 0; k <= j; k++) { \
108 t x = (t)1 << i; \
109 x |= (t)1 << j; \
110 x |= (t)1 << k; \
111 expect_##test_suf##_eq(ffs_##suf(x), k, \
112 "Unexpected result, x=%"pri, x); \
113 } \
114 } \
115 } \
116} while(0)
117
118TEST_BEGIN(test_ffs_u) {
119 TEST_FFS(unsigned, u, u,"u");
120}
121TEST_END
122
123TEST_BEGIN(test_ffs_lu) {
124 TEST_FFS(unsigned long, lu, lu, "lu");
125}
126TEST_END
127
128TEST_BEGIN(test_ffs_llu) {
129 TEST_FFS(unsigned long long, llu, qd, "llu");
130}
131TEST_END
132
133TEST_BEGIN(test_ffs_u32) {
134 TEST_FFS(uint32_t, u32, u32, FMTu32);
135}
136TEST_END
137
138TEST_BEGIN(test_ffs_u64) {
139 TEST_FFS(uint64_t, u64, u64, FMTu64);
140}
141TEST_END
142
143TEST_BEGIN(test_ffs_zu) {
144 TEST_FFS(size_t, zu, zu, "zu");
145}
146TEST_END
147
148#define TEST_FLS(t, suf, test_suf, pri) do { \
149 for (unsigned i = 0; i < sizeof(t) * 8; i++) { \
150 for (unsigned j = 0; j <= i; j++) { \
151 for (unsigned k = 0; k <= j; k++) { \
152 t x = (t)1 << i; \
153 x |= (t)1 << j; \
154 x |= (t)1 << k; \
155 expect_##test_suf##_eq(fls_##suf(x), i, \
156 "Unexpected result, x=%"pri, x); \
157 } \
158 } \
159 } \
160} while(0)
161
162TEST_BEGIN(test_fls_u) {
163 TEST_FLS(unsigned, u, u,"u");
164}
165TEST_END
166
167TEST_BEGIN(test_fls_lu) {
168 TEST_FLS(unsigned long, lu, lu, "lu");
169}
170TEST_END
171
172TEST_BEGIN(test_fls_llu) {
173 TEST_FLS(unsigned long long, llu, qd, "llu");
174}
175TEST_END
176
177TEST_BEGIN(test_fls_u32) {
178 TEST_FLS(uint32_t, u32, u32, FMTu32);
179}
180TEST_END
181
182TEST_BEGIN(test_fls_u64) {
183 TEST_FLS(uint64_t, u64, u64, FMTu64);
184}
185TEST_END
186
187TEST_BEGIN(test_fls_zu) {
188 TEST_FLS(size_t, zu, zu, "zu");
189}
190TEST_END
191
192TEST_BEGIN(test_fls_u_slow) {
193 TEST_FLS(unsigned, u_slow, u,"u");
194}
195TEST_END
196
197TEST_BEGIN(test_fls_lu_slow) {
198 TEST_FLS(unsigned long, lu_slow, lu, "lu");
199}
200TEST_END
201
202TEST_BEGIN(test_fls_llu_slow) {
203 TEST_FLS(unsigned long long, llu_slow, qd, "llu");
204}
205TEST_END
206
207static unsigned
208popcount_byte(unsigned byte) {
209 int count = 0;
210 for (int i = 0; i < 8; i++) {
211 if ((byte & (1 << i)) != 0) {
212 count++;
213 }
214 }
215 return count;
216}
217
218static uint64_t
219expand_byte_to_mask(unsigned byte) {
220 uint64_t result = 0;
221 for (int i = 0; i < 8; i++) {
222 if ((byte & (1 << i)) != 0) {
223 result |= ((uint64_t)0xFF << (i * 8));
224 }
225 }
226 return result;
227}
228
229#define TEST_POPCOUNT(t, suf, pri_hex) do { \
230 t bmul = (t)0x0101010101010101ULL; \
231 for (unsigned i = 0; i < (1 << sizeof(t)); i++) { \
232 for (unsigned j = 0; j < 256; j++) { \
233 /* \
234 * Replicate the byte j into various \
235 * bytes of the integer (as indicated by the \
236 * mask in i), and ensure that the popcount of \
237 * the result is popcount(i) * popcount(j) \
238 */ \
239 t mask = (t)expand_byte_to_mask(i); \
240 t x = (bmul * j) & mask; \
241 expect_u_eq( \
242 popcount_byte(i) * popcount_byte(j), \
243 popcount_##suf(x), \
244 "Unexpected result, x=0x%"pri_hex, x); \
245 } \
246 } \
247} while (0)
248
249TEST_BEGIN(test_popcount_u) {
250 TEST_POPCOUNT(unsigned, u, "x");
251}
252TEST_END
253
254TEST_BEGIN(test_popcount_u_slow) {
255 TEST_POPCOUNT(unsigned, u_slow, "x");
256}
257TEST_END
258
259TEST_BEGIN(test_popcount_lu) {
260 TEST_POPCOUNT(unsigned long, lu, "lx");
261}
262TEST_END
263
264TEST_BEGIN(test_popcount_lu_slow) {
265 TEST_POPCOUNT(unsigned long, lu_slow, "lx");
266}
267TEST_END
268
269TEST_BEGIN(test_popcount_llu) {
270 TEST_POPCOUNT(unsigned long long, llu, "llx");
271}
272TEST_END
273
274TEST_BEGIN(test_popcount_llu_slow) {
275 TEST_POPCOUNT(unsigned long long, llu_slow, "llx");
276}
277TEST_END
278
279int
280main(void) {
281 return test_no_reentrancy(
282 test_pow2_ceil_u64,
283 test_pow2_ceil_u32,
284 test_pow2_ceil_zu,
285 test_lg_ceil_floor,
286 test_ffs_u,
287 test_ffs_lu,
288 test_ffs_llu,
289 test_ffs_u32,
290 test_ffs_u64,
291 test_ffs_zu,
292 test_fls_u,
293 test_fls_lu,
294 test_fls_llu,
295 test_fls_u32,
296 test_fls_u64,
297 test_fls_zu,
298 test_fls_u_slow,
299 test_fls_lu_slow,
300 test_fls_llu_slow,
301 test_popcount_u,
302 test_popcount_u_slow,
303 test_popcount_lu,
304 test_popcount_lu_slow,
305 test_popcount_llu,
306 test_popcount_llu_slow);
307}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/bitmap.c b/examples/redis-unstable/deps/jemalloc/test/unit/bitmap.c
deleted file mode 100644
index 78e542b..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/bitmap.c
+++ /dev/null
@@ -1,343 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "test/nbits.h"
4
5static void
6test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) {
7 bitmap_info_t binfo_dyn;
8 bitmap_info_init(&binfo_dyn, nbits);
9
10 expect_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
11 "Unexpected difference between static and dynamic initialization, "
12 "nbits=%zu", nbits);
13 expect_zu_eq(binfo->nbits, binfo_dyn.nbits,
14 "Unexpected difference between static and dynamic initialization, "
15 "nbits=%zu", nbits);
16#ifdef BITMAP_USE_TREE
17 expect_u_eq(binfo->nlevels, binfo_dyn.nlevels,
18 "Unexpected difference between static and dynamic initialization, "
19 "nbits=%zu", nbits);
20 {
21 unsigned i;
22
23 for (i = 0; i < binfo->nlevels; i++) {
24 expect_zu_eq(binfo->levels[i].group_offset,
25 binfo_dyn.levels[i].group_offset,
26 "Unexpected difference between static and dynamic "
27 "initialization, nbits=%zu, level=%u", nbits, i);
28 }
29 }
30#else
31 expect_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
32 "Unexpected difference between static and dynamic initialization");
33#endif
34}
35
36TEST_BEGIN(test_bitmap_initializer) {
37#define NB(nbits) { \
38 if (nbits <= BITMAP_MAXBITS) { \
39 bitmap_info_t binfo = \
40 BITMAP_INFO_INITIALIZER(nbits); \
41 test_bitmap_initializer_body(&binfo, nbits); \
42 } \
43 }
44 NBITS_TAB
45#undef NB
46}
47TEST_END
48
49static size_t
50test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
51 size_t prev_size) {
52 size_t size = bitmap_size(binfo);
53 expect_zu_ge(size, (nbits >> 3),
54 "Bitmap size is smaller than expected");
55 expect_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
56 return size;
57}
58
59TEST_BEGIN(test_bitmap_size) {
60 size_t nbits, prev_size;
61
62 prev_size = 0;
63 for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
64 bitmap_info_t binfo;
65 bitmap_info_init(&binfo, nbits);
66 prev_size = test_bitmap_size_body(&binfo, nbits, prev_size);
67 }
68#define NB(nbits) { \
69 bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
70 prev_size = test_bitmap_size_body(&binfo, nbits, \
71 prev_size); \
72 }
73 prev_size = 0;
74 NBITS_TAB
75#undef NB
76}
77TEST_END
78
79static void
80test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) {
81 size_t i;
82 bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
83 expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
84
85 bitmap_init(bitmap, binfo, false);
86 for (i = 0; i < nbits; i++) {
87 expect_false(bitmap_get(bitmap, binfo, i),
88 "Bit should be unset");
89 }
90
91 bitmap_init(bitmap, binfo, true);
92 for (i = 0; i < nbits; i++) {
93 expect_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
94 }
95
96 free(bitmap);
97}
98
99TEST_BEGIN(test_bitmap_init) {
100 size_t nbits;
101
102 for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
103 bitmap_info_t binfo;
104 bitmap_info_init(&binfo, nbits);
105 test_bitmap_init_body(&binfo, nbits);
106 }
107#define NB(nbits) { \
108 bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
109 test_bitmap_init_body(&binfo, nbits); \
110 }
111 NBITS_TAB
112#undef NB
113}
114TEST_END
115
116static void
117test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) {
118 size_t i;
119 bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
120 expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
121 bitmap_init(bitmap, binfo, false);
122
123 for (i = 0; i < nbits; i++) {
124 bitmap_set(bitmap, binfo, i);
125 }
126 expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
127 free(bitmap);
128}
129
130TEST_BEGIN(test_bitmap_set) {
131 size_t nbits;
132
133 for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
134 bitmap_info_t binfo;
135 bitmap_info_init(&binfo, nbits);
136 test_bitmap_set_body(&binfo, nbits);
137 }
138#define NB(nbits) { \
139 bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
140 test_bitmap_set_body(&binfo, nbits); \
141 }
142 NBITS_TAB
143#undef NB
144}
145TEST_END
146
147static void
148test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) {
149 size_t i;
150 bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
151 expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
152 bitmap_init(bitmap, binfo, false);
153
154 for (i = 0; i < nbits; i++) {
155 bitmap_set(bitmap, binfo, i);
156 }
157 expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
158 for (i = 0; i < nbits; i++) {
159 bitmap_unset(bitmap, binfo, i);
160 }
161 for (i = 0; i < nbits; i++) {
162 bitmap_set(bitmap, binfo, i);
163 }
164 expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
165 free(bitmap);
166}
167
168TEST_BEGIN(test_bitmap_unset) {
169 size_t nbits;
170
171 for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
172 bitmap_info_t binfo;
173 bitmap_info_init(&binfo, nbits);
174 test_bitmap_unset_body(&binfo, nbits);
175 }
176#define NB(nbits) { \
177 bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
178 test_bitmap_unset_body(&binfo, nbits); \
179 }
180 NBITS_TAB
181#undef NB
182}
183TEST_END
184
185static void
186test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
187 bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
188 expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
189 bitmap_init(bitmap, binfo, false);
190
191 /* Iteratively set bits starting at the beginning. */
192 for (size_t i = 0; i < nbits; i++) {
193 expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
194 "First unset bit should be just after previous first unset "
195 "bit");
196 expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
197 "First unset bit should be just after previous first unset "
198 "bit");
199 expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
200 "First unset bit should be just after previous first unset "
201 "bit");
202 expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
203 "First unset bit should be just after previous first unset "
204 "bit");
205 }
206 expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
207
208 /*
209 * Iteratively unset bits starting at the end, and verify that
210 * bitmap_sfu() reaches the unset bits.
211 */
212 for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
213 bitmap_unset(bitmap, binfo, i);
214 expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
215 "First unset bit should the bit previously unset");
216 expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
217 "First unset bit should the bit previously unset");
218 expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
219 "First unset bit should the bit previously unset");
220 expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
221 "First unset bit should the bit previously unset");
222 bitmap_unset(bitmap, binfo, i);
223 }
224 expect_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
225
226 /*
227 * Iteratively set bits starting at the beginning, and verify that
228 * bitmap_sfu() looks past them.
229 */
230 for (size_t i = 1; i < nbits; i++) {
231 bitmap_set(bitmap, binfo, i - 1);
232 expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
233 "First unset bit should be just after the bit previously "
234 "set");
235 expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
236 "First unset bit should be just after the bit previously "
237 "set");
238 expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
239 "First unset bit should be just after the bit previously "
240 "set");
241 expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
242 "First unset bit should be just after the bit previously "
243 "set");
244 bitmap_unset(bitmap, binfo, i);
245 }
246 expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
247 "First unset bit should be the last bit");
248 expect_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
249 nbits - 1, "First unset bit should be the last bit");
250 expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
251 "First unset bit should be the last bit");
252 expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
253 "First unset bit should be the last bit");
254 expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
255
256 /*
257 * Bubble a "usu" pattern through the bitmap and verify that
258 * bitmap_ffu() finds the correct bit for all five min_bit cases.
259 */
260 if (nbits >= 3) {
261 for (size_t i = 0; i < nbits-2; i++) {
262 bitmap_unset(bitmap, binfo, i);
263 bitmap_unset(bitmap, binfo, i+2);
264 if (i > 0) {
265 expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
266 "Unexpected first unset bit");
267 }
268 expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
269 "Unexpected first unset bit");
270 expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
271 "Unexpected first unset bit");
272 expect_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
273 "Unexpected first unset bit");
274 if (i + 3 < nbits) {
275 expect_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
276 nbits, "Unexpected first unset bit");
277 }
278 expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
279 "Unexpected first unset bit");
280 expect_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
281 "Unexpected first unset bit");
282 }
283 }
284
285 /*
286 * Unset the last bit, bubble another unset bit through the bitmap, and
287 * verify that bitmap_ffu() finds the correct bit for all four min_bit
288 * cases.
289 */
290 if (nbits >= 3) {
291 bitmap_unset(bitmap, binfo, nbits-1);
292 for (size_t i = 0; i < nbits-1; i++) {
293 bitmap_unset(bitmap, binfo, i);
294 if (i > 0) {
295 expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
296 "Unexpected first unset bit");
297 }
298 expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
299 "Unexpected first unset bit");
300 expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
301 "Unexpected first unset bit");
302 expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
303 nbits-1, "Unexpected first unset bit");
304
305 expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
306 "Unexpected first unset bit");
307 }
308 expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
309 "Unexpected first unset bit");
310 }
311
312 free(bitmap);
313}
314
315TEST_BEGIN(test_bitmap_xfu) {
316 size_t nbits, nbits_max;
317
318 /* The test is O(n^2); large page sizes may slow down too much. */
319 nbits_max = BITMAP_MAXBITS > 512 ? 512 : BITMAP_MAXBITS;
320 for (nbits = 1; nbits <= nbits_max; nbits++) {
321 bitmap_info_t binfo;
322 bitmap_info_init(&binfo, nbits);
323 test_bitmap_xfu_body(&binfo, nbits);
324 }
325#define NB(nbits) { \
326 bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
327 test_bitmap_xfu_body(&binfo, nbits); \
328 }
329 NBITS_TAB
330#undef NB
331}
332TEST_END
333
334int
335main(void) {
336 return test(
337 test_bitmap_initializer,
338 test_bitmap_size,
339 test_bitmap_init,
340 test_bitmap_set,
341 test_bitmap_unset,
342 test_bitmap_xfu);
343}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/buf_writer.c b/examples/redis-unstable/deps/jemalloc/test/unit/buf_writer.c
deleted file mode 100644
index d5e63a0..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/buf_writer.c
+++ /dev/null
@@ -1,196 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/buf_writer.h"
4
5#define TEST_BUF_SIZE 16
6#define UNIT_MAX (TEST_BUF_SIZE * 3)
7
8static size_t test_write_len;
9static char test_buf[TEST_BUF_SIZE];
10static uint64_t arg;
11static uint64_t arg_store;
12
13static void
14test_write_cb(void *cbopaque, const char *s) {
15 size_t prev_test_write_len = test_write_len;
16 test_write_len += strlen(s); /* only increase the length */
17 arg_store = *(uint64_t *)cbopaque; /* only pass along the argument */
18 assert_zu_le(prev_test_write_len, test_write_len,
19 "Test write overflowed");
20}
21
22static void
23test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
24 char s[UNIT_MAX + 1];
25 size_t n_unit, remain, i;
26 ssize_t unit;
27
28 assert(buf_writer->buf != NULL);
29 memset(s, 'a', UNIT_MAX);
30 arg = 4; /* Starting value of random argument. */
31 arg_store = arg;
32 for (unit = UNIT_MAX; unit >= 0; --unit) {
33 /* unit keeps decreasing, so strlen(s) is always unit. */
34 s[unit] = '\0';
35 for (n_unit = 1; n_unit <= 3; ++n_unit) {
36 test_write_len = 0;
37 remain = 0;
38 for (i = 1; i <= n_unit; ++i) {
39 arg = prng_lg_range_u64(&arg, 64);
40 buf_writer_cb(buf_writer, s);
41 remain += unit;
42 if (remain > buf_writer->buf_size) {
43 /* Flushes should have happened. */
44 assert_u64_eq(arg_store, arg, "Call "
45 "back argument didn't get through");
46 remain %= buf_writer->buf_size;
47 if (remain == 0) {
48 /* Last flush should be lazy. */
49 remain += buf_writer->buf_size;
50 }
51 }
52 assert_zu_eq(test_write_len + remain, i * unit,
53 "Incorrect length after writing %zu strings"
54 " of length %zu", i, unit);
55 }
56 buf_writer_flush(buf_writer);
57 expect_zu_eq(test_write_len, n_unit * unit,
58 "Incorrect length after flushing at the end of"
59 " writing %zu strings of length %zu", n_unit, unit);
60 }
61 }
62 buf_writer_terminate(tsdn, buf_writer);
63}
64
65TEST_BEGIN(test_buf_write_static) {
66 buf_writer_t buf_writer;
67 tsdn_t *tsdn = tsdn_fetch();
68 assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
69 test_buf, TEST_BUF_SIZE),
70 "buf_writer_init() should not encounter error on static buffer");
71 test_buf_writer_body(tsdn, &buf_writer);
72}
73TEST_END
74
75TEST_BEGIN(test_buf_write_dynamic) {
76 buf_writer_t buf_writer;
77 tsdn_t *tsdn = tsdn_fetch();
78 assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
79 NULL, TEST_BUF_SIZE), "buf_writer_init() should not OOM");
80 test_buf_writer_body(tsdn, &buf_writer);
81}
82TEST_END
83
84TEST_BEGIN(test_buf_write_oom) {
85 buf_writer_t buf_writer;
86 tsdn_t *tsdn = tsdn_fetch();
87 assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
88 NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
89 assert(buf_writer.buf == NULL);
90
91 char s[UNIT_MAX + 1];
92 size_t n_unit, i;
93 ssize_t unit;
94
95 memset(s, 'a', UNIT_MAX);
96 arg = 4; /* Starting value of random argument. */
97 arg_store = arg;
98 for (unit = UNIT_MAX; unit >= 0; unit -= UNIT_MAX / 4) {
99 /* unit keeps decreasing, so strlen(s) is always unit. */
100 s[unit] = '\0';
101 for (n_unit = 1; n_unit <= 3; ++n_unit) {
102 test_write_len = 0;
103 for (i = 1; i <= n_unit; ++i) {
104 arg = prng_lg_range_u64(&arg, 64);
105 buf_writer_cb(&buf_writer, s);
106 assert_u64_eq(arg_store, arg,
107 "Call back argument didn't get through");
108 assert_zu_eq(test_write_len, i * unit,
109 "Incorrect length after writing %zu strings"
110 " of length %zu", i, unit);
111 }
112 buf_writer_flush(&buf_writer);
113 expect_zu_eq(test_write_len, n_unit * unit,
114 "Incorrect length after flushing at the end of"
115 " writing %zu strings of length %zu", n_unit, unit);
116 }
117 }
118 buf_writer_terminate(tsdn, &buf_writer);
119}
120TEST_END
121
122static int test_read_count;
123static size_t test_read_len;
124static uint64_t arg_sum;
125
126ssize_t
127test_read_cb(void *cbopaque, void *buf, size_t limit) {
128 static uint64_t rand = 4;
129
130 arg_sum += *(uint64_t *)cbopaque;
131 assert_zu_gt(limit, 0, "Limit for read_cb must be positive");
132 --test_read_count;
133 if (test_read_count == 0) {
134 return -1;
135 } else {
136 size_t read_len = limit;
137 if (limit > 1) {
138 rand = prng_range_u64(&rand, (uint64_t)limit);
139 read_len -= (size_t)rand;
140 }
141 assert(read_len > 0);
142 memset(buf, 'a', read_len);
143 size_t prev_test_read_len = test_read_len;
144 test_read_len += read_len;
145 assert_zu_le(prev_test_read_len, test_read_len,
146 "Test read overflowed");
147 return read_len;
148 }
149}
150
151static void
152test_buf_writer_pipe_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
153 arg = 4; /* Starting value of random argument. */
154 for (int count = 5; count > 0; --count) {
155 arg = prng_lg_range_u64(&arg, 64);
156 arg_sum = 0;
157 test_read_count = count;
158 test_read_len = 0;
159 test_write_len = 0;
160 buf_writer_pipe(buf_writer, test_read_cb, &arg);
161 assert(test_read_count == 0);
162 expect_u64_eq(arg_sum, arg * count, "");
163 expect_zu_eq(test_write_len, test_read_len,
164 "Write length should be equal to read length");
165 }
166 buf_writer_terminate(tsdn, buf_writer);
167}
168
169TEST_BEGIN(test_buf_write_pipe) {
170 buf_writer_t buf_writer;
171 tsdn_t *tsdn = tsdn_fetch();
172 assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
173 test_buf, TEST_BUF_SIZE),
174 "buf_writer_init() should not encounter error on static buffer");
175 test_buf_writer_pipe_body(tsdn, &buf_writer);
176}
177TEST_END
178
179TEST_BEGIN(test_buf_write_pipe_oom) {
180 buf_writer_t buf_writer;
181 tsdn_t *tsdn = tsdn_fetch();
182 assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
183 NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
184 test_buf_writer_pipe_body(tsdn, &buf_writer);
185}
186TEST_END
187
188int
189main(void) {
190 return test(
191 test_buf_write_static,
192 test_buf_write_dynamic,
193 test_buf_write_oom,
194 test_buf_write_pipe,
195 test_buf_write_pipe_oom);
196}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/cache_bin.c b/examples/redis-unstable/deps/jemalloc/test/unit/cache_bin.c
deleted file mode 100644
index 3b6dbab..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/cache_bin.c
+++ /dev/null
@@ -1,384 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static void
4do_fill_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
5 cache_bin_sz_t ncached_max, cache_bin_sz_t nfill_attempt,
6 cache_bin_sz_t nfill_succeed) {
7 bool success;
8 void *ptr;
9 assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
10 CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill_attempt);
11 cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill_attempt);
12 for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
13 arr.ptr[i] = &ptrs[i];
14 }
15 cache_bin_finish_fill(bin, info, &arr, nfill_succeed);
16 expect_true(cache_bin_ncached_get_local(bin, info) == nfill_succeed,
17 "");
18 cache_bin_low_water_set(bin);
19
20 for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
21 ptr = cache_bin_alloc(bin, &success);
22 expect_true(success, "");
23 expect_ptr_eq(ptr, (void *)&ptrs[i],
24 "Should pop in order filled");
25 expect_true(cache_bin_low_water_get(bin, info)
26 == nfill_succeed - i - 1, "");
27 }
28 expect_true(cache_bin_ncached_get_local(bin, info) == 0, "");
29 expect_true(cache_bin_low_water_get(bin, info) == 0, "");
30}
31
32static void
33do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
34 cache_bin_sz_t nfill, cache_bin_sz_t nflush) {
35 bool success;
36 assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
37
38 for (cache_bin_sz_t i = 0; i < nfill; i++) {
39 success = cache_bin_dalloc_easy(bin, &ptrs[i]);
40 expect_true(success, "");
41 }
42
43 CACHE_BIN_PTR_ARRAY_DECLARE(arr, nflush);
44 cache_bin_init_ptr_array_for_flush(bin, info, &arr, nflush);
45 for (cache_bin_sz_t i = 0; i < nflush; i++) {
46 expect_ptr_eq(arr.ptr[i], &ptrs[nflush - i - 1], "");
47 }
48 cache_bin_finish_flush(bin, info, &arr, nflush);
49
50 expect_true(cache_bin_ncached_get_local(bin, info) == nfill - nflush,
51 "");
52 while (cache_bin_ncached_get_local(bin, info) > 0) {
53 cache_bin_alloc(bin, &success);
54 }
55}
56
57static void
58do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
59 cache_bin_sz_t nfill, size_t batch) {
60 assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
61 CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill);
62 cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill);
63 for (cache_bin_sz_t i = 0; i < nfill; i++) {
64 arr.ptr[i] = &ptrs[i];
65 }
66 cache_bin_finish_fill(bin, info, &arr, nfill);
67 assert_true(cache_bin_ncached_get_local(bin, info) == nfill, "");
68 cache_bin_low_water_set(bin);
69
70 void **out = malloc((batch + 1) * sizeof(void *));
71 size_t n = cache_bin_alloc_batch(bin, batch, out);
72 assert_true(n == ((size_t)nfill < batch ? (size_t)nfill : batch), "");
73 for (cache_bin_sz_t i = 0; i < (cache_bin_sz_t)n; i++) {
74 expect_ptr_eq(out[i], &ptrs[i], "");
75 }
76 expect_true(cache_bin_low_water_get(bin, info) == nfill -
77 (cache_bin_sz_t)n, "");
78 while (cache_bin_ncached_get_local(bin, info) > 0) {
79 bool success;
80 cache_bin_alloc(bin, &success);
81 }
82 free(out);
83}
84
85static void
86test_bin_init(cache_bin_t *bin, cache_bin_info_t *info) {
87 size_t size;
88 size_t alignment;
89 cache_bin_info_compute_alloc(info, 1, &size, &alignment);
90 void *mem = mallocx(size, MALLOCX_ALIGN(alignment));
91 assert_ptr_not_null(mem, "Unexpected mallocx failure");
92
93 size_t cur_offset = 0;
94 cache_bin_preincrement(info, 1, mem, &cur_offset);
95 cache_bin_init(bin, info, mem, &cur_offset);
96 cache_bin_postincrement(info, 1, mem, &cur_offset);
97 assert_zu_eq(cur_offset, size, "Should use all requested memory");
98}
99
100TEST_BEGIN(test_cache_bin) {
101 const int ncached_max = 100;
102 bool success;
103 void *ptr;
104
105 cache_bin_info_t info;
106 cache_bin_info_init(&info, ncached_max);
107 cache_bin_t bin;
108 test_bin_init(&bin, &info);
109
110 /* Initialize to empty; should then have 0 elements. */
111 expect_d_eq(ncached_max, cache_bin_info_ncached_max(&info), "");
112 expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
113 expect_true(cache_bin_low_water_get(&bin, &info) == 0, "");
114
115 ptr = cache_bin_alloc_easy(&bin, &success);
116 expect_false(success, "Shouldn't successfully allocate when empty");
117 expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
118
119 ptr = cache_bin_alloc(&bin, &success);
120 expect_false(success, "Shouldn't successfully allocate when empty");
121 expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
122
123 /*
124 * We allocate one more item than ncached_max, so we can test cache bin
125 * exhaustion.
126 */
127 void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
128 assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
129 for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
130 expect_true(cache_bin_ncached_get_local(&bin, &info) == i, "");
131 success = cache_bin_dalloc_easy(&bin, &ptrs[i]);
132 expect_true(success,
133 "Should be able to dalloc into a non-full cache bin.");
134 expect_true(cache_bin_low_water_get(&bin, &info) == 0,
135 "Pushes and pops shouldn't change low water of zero.");
136 }
137 expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
138 "");
139 success = cache_bin_dalloc_easy(&bin, &ptrs[ncached_max]);
140 expect_false(success, "Shouldn't be able to dalloc into a full bin.");
141
142 cache_bin_low_water_set(&bin);
143
144 for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
145 expect_true(cache_bin_low_water_get(&bin, &info)
146 == ncached_max - i, "");
147 expect_true(cache_bin_ncached_get_local(&bin, &info)
148 == ncached_max - i, "");
149 /*
150 * This should fail -- the easy variant can't change the low
151 * water mark.
152 */
153 ptr = cache_bin_alloc_easy(&bin, &success);
154 expect_ptr_null(ptr, "");
155 expect_false(success, "");
156 expect_true(cache_bin_low_water_get(&bin, &info)
157 == ncached_max - i, "");
158 expect_true(cache_bin_ncached_get_local(&bin, &info)
159 == ncached_max - i, "");
160
161 /* This should succeed, though. */
162 ptr = cache_bin_alloc(&bin, &success);
163 expect_true(success, "");
164 expect_ptr_eq(ptr, &ptrs[ncached_max - i - 1],
165 "Alloc should pop in stack order");
166 expect_true(cache_bin_low_water_get(&bin, &info)
167 == ncached_max - i - 1, "");
168 expect_true(cache_bin_ncached_get_local(&bin, &info)
169 == ncached_max - i - 1, "");
170 }
171 /* Now we're empty -- all alloc attempts should fail. */
172 expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
173 ptr = cache_bin_alloc_easy(&bin, &success);
174 expect_ptr_null(ptr, "");
175 expect_false(success, "");
176 ptr = cache_bin_alloc(&bin, &success);
177 expect_ptr_null(ptr, "");
178 expect_false(success, "");
179
180 for (cache_bin_sz_t i = 0; i < ncached_max / 2; i++) {
181 cache_bin_dalloc_easy(&bin, &ptrs[i]);
182 }
183 cache_bin_low_water_set(&bin);
184
185 for (cache_bin_sz_t i = ncached_max / 2; i < ncached_max; i++) {
186 cache_bin_dalloc_easy(&bin, &ptrs[i]);
187 }
188 expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
189 "");
190 for (cache_bin_sz_t i = ncached_max - 1; i >= ncached_max / 2; i--) {
191 /*
192 * Size is bigger than low water -- the reduced version should
193 * succeed.
194 */
195 ptr = cache_bin_alloc_easy(&bin, &success);
196 expect_true(success, "");
197 expect_ptr_eq(ptr, &ptrs[i], "");
198 }
199 /* But now, we've hit low-water. */
200 ptr = cache_bin_alloc_easy(&bin, &success);
201 expect_false(success, "");
202 expect_ptr_null(ptr, "");
203
204 /* We're going to test filling -- we must be empty to start. */
205 while (cache_bin_ncached_get_local(&bin, &info)) {
206 cache_bin_alloc(&bin, &success);
207 expect_true(success, "");
208 }
209
210 /* Test fill. */
211 /* Try to fill all, succeed fully. */
212 do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, ncached_max);
213 /* Try to fill all, succeed partially. */
214 do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max,
215 ncached_max / 2);
216 /* Try to fill all, fail completely. */
217 do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, 0);
218
219 /* Try to fill some, succeed fully. */
220 do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
221 ncached_max / 2);
222 /* Try to fill some, succeed partially. */
223 do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
224 ncached_max / 4);
225 /* Try to fill some, fail completely. */
226 do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2, 0);
227
228 do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max);
229 do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
230 do_flush_test(&bin, &info, ptrs, ncached_max, 0);
231 do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
232 do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
233 do_flush_test(&bin, &info, ptrs, ncached_max / 2, 0);
234
235 do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max);
236 do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max * 2);
237 do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
238 do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 2);
239 do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 1);
240 do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 0);
241 do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
242 ncached_max / 2);
243 do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, ncached_max);
244 do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
245 ncached_max / 4);
246 do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 2);
247 do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 1);
248 do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 0);
249 do_batch_alloc_test(&bin, &info, ptrs, 2, ncached_max);
250 do_batch_alloc_test(&bin, &info, ptrs, 2, 2);
251 do_batch_alloc_test(&bin, &info, ptrs, 2, 1);
252 do_batch_alloc_test(&bin, &info, ptrs, 2, 0);
253 do_batch_alloc_test(&bin, &info, ptrs, 1, 2);
254 do_batch_alloc_test(&bin, &info, ptrs, 1, 1);
255 do_batch_alloc_test(&bin, &info, ptrs, 1, 0);
256 do_batch_alloc_test(&bin, &info, ptrs, 0, 2);
257 do_batch_alloc_test(&bin, &info, ptrs, 0, 1);
258 do_batch_alloc_test(&bin, &info, ptrs, 0, 0);
259
260 free(ptrs);
261}
262TEST_END
263
264static void
265do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
266 cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
267 expect_true(cache_bin_ncached_get_local(bin, info) == 0,
268 "Bin not empty");
269 expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
270 "Bin not empty");
271 expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
272
273 bool ret;
274 /* Fill */
275 for (cache_bin_sz_t i = 0; i < nfill; i++) {
276 ret = cache_bin_dalloc_easy(bin, &ptrs[i]);
277 expect_true(ret, "Unexpected fill failure");
278 }
279 expect_true(cache_bin_ncached_get_local(bin, info) == nfill,
280 "Wrong cached count");
281
282 /* Stash */
283 for (cache_bin_sz_t i = 0; i < nstash; i++) {
284 ret = cache_bin_stash(bin, &ptrs[i + nfill]);
285 expect_true(ret, "Unexpected stash failure");
286 }
287 expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
288 "Wrong stashed count");
289
290 if (nfill + nstash == info->ncached_max) {
291 ret = cache_bin_dalloc_easy(bin, &ptrs[0]);
292 expect_false(ret, "Should not dalloc into a full bin");
293 ret = cache_bin_stash(bin, &ptrs[0]);
294 expect_false(ret, "Should not stash into a full bin");
295 }
296
297 /* Alloc filled ones */
298 for (cache_bin_sz_t i = 0; i < nfill; i++) {
299 void *ptr = cache_bin_alloc(bin, &ret);
300 expect_true(ret, "Unexpected alloc failure");
301 /* Verify it's not from the stashed range. */
302 expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill],
303 "Should not alloc stashed ptrs");
304 }
305 expect_true(cache_bin_ncached_get_local(bin, info) == 0,
306 "Wrong cached count");
307 expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
308 "Wrong stashed count");
309
310 cache_bin_alloc(bin, &ret);
311 expect_false(ret, "Should not alloc stashed");
312
313 /* Clear stashed ones */
314 cache_bin_finish_flush_stashed(bin, info);
315 expect_true(cache_bin_ncached_get_local(bin, info) == 0,
316 "Wrong cached count");
317 expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
318 "Wrong stashed count");
319
320 cache_bin_alloc(bin, &ret);
321 expect_false(ret, "Should not alloc from empty bin");
322}
323
324TEST_BEGIN(test_cache_bin_stash) {
325 const int ncached_max = 100;
326
327 cache_bin_t bin;
328 cache_bin_info_t info;
329 cache_bin_info_init(&info, ncached_max);
330 test_bin_init(&bin, &info);
331
332 /*
333 * The content of this array is not accessed; instead the interior
334 * addresses are used to insert / stash into the bins as test pointers.
335 */
336 void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
337 assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
338 bool ret;
339 for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
340 expect_true(cache_bin_ncached_get_local(&bin, &info) ==
341 (i / 2 + i % 2), "Wrong ncached value");
342 expect_true(cache_bin_nstashed_get_local(&bin, &info) == i / 2,
343 "Wrong nstashed value");
344 if (i % 2 == 0) {
345 cache_bin_dalloc_easy(&bin, &ptrs[i]);
346 } else {
347 ret = cache_bin_stash(&bin, &ptrs[i]);
348 expect_true(ret, "Should be able to stash into a "
349 "non-full cache bin");
350 }
351 }
352 ret = cache_bin_dalloc_easy(&bin, &ptrs[0]);
353 expect_false(ret, "Should not dalloc into a full cache bin");
354 ret = cache_bin_stash(&bin, &ptrs[0]);
355 expect_false(ret, "Should not stash into a full cache bin");
356 for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
357 void *ptr = cache_bin_alloc(&bin, &ret);
358 if (i < ncached_max / 2) {
359 expect_true(ret, "Should be able to alloc");
360 uintptr_t diff = ((uintptr_t)ptr - (uintptr_t)&ptrs[0])
361 / sizeof(void *);
362 expect_true(diff % 2 == 0, "Should be able to alloc");
363 } else {
364 expect_false(ret, "Should not alloc stashed");
365 expect_true(cache_bin_nstashed_get_local(&bin, &info) ==
366 ncached_max / 2, "Wrong nstashed value");
367 }
368 }
369
370 test_bin_init(&bin, &info);
371 do_flush_stashed_test(&bin, &info, ptrs, ncached_max, 0);
372 do_flush_stashed_test(&bin, &info, ptrs, 0, ncached_max);
373 do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
374 do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 2);
375 do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
376 do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 4);
377}
378TEST_END
379
380int
381main(void) {
382 return test(test_cache_bin,
383 test_cache_bin_stash);
384}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/ckh.c b/examples/redis-unstable/deps/jemalloc/test/unit/ckh.c
deleted file mode 100644
index 36142ac..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/ckh.c
+++ /dev/null
@@ -1,211 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_new_delete) {
4 tsd_t *tsd;
5 ckh_t ckh;
6
7 tsd = tsd_fetch();
8
9 expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
10 ckh_string_keycomp), "Unexpected ckh_new() error");
11 ckh_delete(tsd, &ckh);
12
13 expect_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
14 ckh_pointer_keycomp), "Unexpected ckh_new() error");
15 ckh_delete(tsd, &ckh);
16}
17TEST_END
18
19TEST_BEGIN(test_count_insert_search_remove) {
20 tsd_t *tsd;
21 ckh_t ckh;
22 const char *strs[] = {
23 "a string",
24 "A string",
25 "a string.",
26 "A string."
27 };
28 const char *missing = "A string not in the hash table.";
29 size_t i;
30
31 tsd = tsd_fetch();
32
33 expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
34 ckh_string_keycomp), "Unexpected ckh_new() error");
35 expect_zu_eq(ckh_count(&ckh), 0,
36 "ckh_count() should return %zu, but it returned %zu", ZU(0),
37 ckh_count(&ckh));
38
39 /* Insert. */
40 for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
41 ckh_insert(tsd, &ckh, strs[i], strs[i]);
42 expect_zu_eq(ckh_count(&ckh), i+1,
43 "ckh_count() should return %zu, but it returned %zu", i+1,
44 ckh_count(&ckh));
45 }
46
47 /* Search. */
48 for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
49 union {
50 void *p;
51 const char *s;
52 } k, v;
53 void **kp, **vp;
54 const char *ks, *vs;
55
56 kp = (i & 1) ? &k.p : NULL;
57 vp = (i & 2) ? &v.p : NULL;
58 k.p = NULL;
59 v.p = NULL;
60 expect_false(ckh_search(&ckh, strs[i], kp, vp),
61 "Unexpected ckh_search() error");
62
63 ks = (i & 1) ? strs[i] : (const char *)NULL;
64 vs = (i & 2) ? strs[i] : (const char *)NULL;
65 expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
66 i);
67 expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
68 i);
69 }
70 expect_true(ckh_search(&ckh, missing, NULL, NULL),
71 "Unexpected ckh_search() success");
72
73 /* Remove. */
74 for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
75 union {
76 void *p;
77 const char *s;
78 } k, v;
79 void **kp, **vp;
80 const char *ks, *vs;
81
82 kp = (i & 1) ? &k.p : NULL;
83 vp = (i & 2) ? &v.p : NULL;
84 k.p = NULL;
85 v.p = NULL;
86 expect_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
87 "Unexpected ckh_remove() error");
88
89 ks = (i & 1) ? strs[i] : (const char *)NULL;
90 vs = (i & 2) ? strs[i] : (const char *)NULL;
91 expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
92 i);
93 expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
94 i);
95 expect_zu_eq(ckh_count(&ckh),
96 sizeof(strs)/sizeof(const char *) - i - 1,
97 "ckh_count() should return %zu, but it returned %zu",
98 sizeof(strs)/sizeof(const char *) - i - 1,
99 ckh_count(&ckh));
100 }
101
102 ckh_delete(tsd, &ckh);
103}
104TEST_END
105
106TEST_BEGIN(test_insert_iter_remove) {
107#define NITEMS ZU(1000)
108 tsd_t *tsd;
109 ckh_t ckh;
110 void **p[NITEMS];
111 void *q, *r;
112 size_t i;
113
114 tsd = tsd_fetch();
115
116 expect_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
117 ckh_pointer_keycomp), "Unexpected ckh_new() error");
118
119 for (i = 0; i < NITEMS; i++) {
120 p[i] = mallocx(i+1, 0);
121 expect_ptr_not_null(p[i], "Unexpected mallocx() failure");
122 }
123
124 for (i = 0; i < NITEMS; i++) {
125 size_t j;
126
127 for (j = i; j < NITEMS; j++) {
128 expect_false(ckh_insert(tsd, &ckh, p[j], p[j]),
129 "Unexpected ckh_insert() failure");
130 expect_false(ckh_search(&ckh, p[j], &q, &r),
131 "Unexpected ckh_search() failure");
132 expect_ptr_eq(p[j], q, "Key pointer mismatch");
133 expect_ptr_eq(p[j], r, "Value pointer mismatch");
134 }
135
136 expect_zu_eq(ckh_count(&ckh), NITEMS,
137 "ckh_count() should return %zu, but it returned %zu",
138 NITEMS, ckh_count(&ckh));
139
140 for (j = i + 1; j < NITEMS; j++) {
141 expect_false(ckh_search(&ckh, p[j], NULL, NULL),
142 "Unexpected ckh_search() failure");
143 expect_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
144 "Unexpected ckh_remove() failure");
145 expect_ptr_eq(p[j], q, "Key pointer mismatch");
146 expect_ptr_eq(p[j], r, "Value pointer mismatch");
147 expect_true(ckh_search(&ckh, p[j], NULL, NULL),
148 "Unexpected ckh_search() success");
149 expect_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
150 "Unexpected ckh_remove() success");
151 }
152
153 {
154 bool seen[NITEMS];
155 size_t tabind;
156
157 memset(seen, 0, sizeof(seen));
158
159 for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
160 size_t k;
161
162 expect_ptr_eq(q, r, "Key and val not equal");
163
164 for (k = 0; k < NITEMS; k++) {
165 if (p[k] == q) {
166 expect_false(seen[k],
167 "Item %zu already seen", k);
168 seen[k] = true;
169 break;
170 }
171 }
172 }
173
174 for (j = 0; j < i + 1; j++) {
175 expect_true(seen[j], "Item %zu not seen", j);
176 }
177 for (; j < NITEMS; j++) {
178 expect_false(seen[j], "Item %zu seen", j);
179 }
180 }
181 }
182
183 for (i = 0; i < NITEMS; i++) {
184 expect_false(ckh_search(&ckh, p[i], NULL, NULL),
185 "Unexpected ckh_search() failure");
186 expect_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
187 "Unexpected ckh_remove() failure");
188 expect_ptr_eq(p[i], q, "Key pointer mismatch");
189 expect_ptr_eq(p[i], r, "Value pointer mismatch");
190 expect_true(ckh_search(&ckh, p[i], NULL, NULL),
191 "Unexpected ckh_search() success");
192 expect_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
193 "Unexpected ckh_remove() success");
194 dallocx(p[i], 0);
195 }
196
197 expect_zu_eq(ckh_count(&ckh), 0,
198 "ckh_count() should return %zu, but it returned %zu",
199 ZU(0), ckh_count(&ckh));
200 ckh_delete(tsd, &ckh);
201#undef NITEMS
202}
203TEST_END
204
205int
206main(void) {
207 return test(
208 test_new_delete,
209 test_count_insert_search_remove,
210 test_insert_iter_remove);
211}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/counter.c b/examples/redis-unstable/deps/jemalloc/test/unit/counter.c
deleted file mode 100644
index 277baac..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/counter.c
+++ /dev/null
@@ -1,80 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static const uint64_t interval = 1 << 20;
4
5TEST_BEGIN(test_counter_accum) {
6 uint64_t increment = interval >> 4;
7 unsigned n = interval / increment;
8 uint64_t accum = 0;
9
10 counter_accum_t c;
11 counter_accum_init(&c, interval);
12
13 tsd_t *tsd = tsd_fetch();
14 bool trigger;
15 for (unsigned i = 0; i < n; i++) {
16 trigger = counter_accum(tsd_tsdn(tsd), &c, increment);
17 accum += increment;
18 if (accum < interval) {
19 expect_b_eq(trigger, false, "Should not trigger");
20 } else {
21 expect_b_eq(trigger, true, "Should have triggered");
22 }
23 }
24 expect_b_eq(trigger, true, "Should have triggered");
25}
26TEST_END
27
28void
29expect_counter_value(counter_accum_t *c, uint64_t v) {
30 uint64_t accum = locked_read_u64_unsynchronized(&c->accumbytes);
31 expect_u64_eq(accum, v, "Counter value mismatch");
32}
33
34#define N_THDS (16)
35#define N_ITER_THD (1 << 12)
36#define ITER_INCREMENT (interval >> 4)
37
38static void *
39thd_start(void *varg) {
40 counter_accum_t *c = (counter_accum_t *)varg;
41
42 tsd_t *tsd = tsd_fetch();
43 bool trigger;
44 uintptr_t n_triggered = 0;
45 for (unsigned i = 0; i < N_ITER_THD; i++) {
46 trigger = counter_accum(tsd_tsdn(tsd), c, ITER_INCREMENT);
47 n_triggered += trigger ? 1 : 0;
48 }
49
50 return (void *)n_triggered;
51}
52
53
54TEST_BEGIN(test_counter_mt) {
55 counter_accum_t shared_c;
56 counter_accum_init(&shared_c, interval);
57
58 thd_t thds[N_THDS];
59 unsigned i;
60 for (i = 0; i < N_THDS; i++) {
61 thd_create(&thds[i], thd_start, (void *)&shared_c);
62 }
63
64 uint64_t sum = 0;
65 for (i = 0; i < N_THDS; i++) {
66 void *ret;
67 thd_join(thds[i], &ret);
68 sum += (uintptr_t)ret;
69 }
70 expect_u64_eq(sum, N_THDS * N_ITER_THD / (interval / ITER_INCREMENT),
71 "Incorrect number of triggers");
72}
73TEST_END
74
75int
76main(void) {
77 return test(
78 test_counter_accum,
79 test_counter_mt);
80}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/decay.c b/examples/redis-unstable/deps/jemalloc/test/unit/decay.c
deleted file mode 100644
index bdb6d0a..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/decay.c
+++ /dev/null
@@ -1,283 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/decay.h"
4
5TEST_BEGIN(test_decay_init) {
6 decay_t decay;
7 memset(&decay, 0, sizeof(decay));
8
9 nstime_t curtime;
10 nstime_init(&curtime, 0);
11
12 ssize_t decay_ms = 1000;
13 assert_true(decay_ms_valid(decay_ms), "");
14
15 expect_false(decay_init(&decay, &curtime, decay_ms),
16 "Failed to initialize decay");
17 expect_zd_eq(decay_ms_read(&decay), decay_ms,
18 "Decay_ms was initialized incorrectly");
19 expect_u64_ne(decay_epoch_duration_ns(&decay), 0,
20 "Epoch duration was initialized incorrectly");
21}
22TEST_END
23
24TEST_BEGIN(test_decay_ms_valid) {
25 expect_false(decay_ms_valid(-7),
26 "Misclassified negative decay as valid");
27 expect_true(decay_ms_valid(-1),
28 "Misclassified -1 (never decay) as invalid decay");
29 expect_true(decay_ms_valid(8943),
30 "Misclassified valid decay");
31 if (SSIZE_MAX > NSTIME_SEC_MAX) {
32 expect_false(
33 decay_ms_valid((ssize_t)(NSTIME_SEC_MAX * KQU(1000) + 39)),
34 "Misclassified too large decay");
35 }
36}
37TEST_END
38
39TEST_BEGIN(test_decay_npages_purge_in) {
40 decay_t decay;
41 memset(&decay, 0, sizeof(decay));
42
43 nstime_t curtime;
44 nstime_init(&curtime, 0);
45
46 uint64_t decay_ms = 1000;
47 nstime_t decay_nstime;
48 nstime_init(&decay_nstime, decay_ms * 1000 * 1000);
49 expect_false(decay_init(&decay, &curtime, (ssize_t)decay_ms),
50 "Failed to initialize decay");
51
52 size_t new_pages = 100;
53
54 nstime_t time;
55 nstime_copy(&time, &decay_nstime);
56 expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
57 new_pages, "Not all pages are expected to decay in decay_ms");
58
59 nstime_init(&time, 0);
60 expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages), 0,
61 "More than zero pages are expected to instantly decay");
62
63 nstime_copy(&time, &decay_nstime);
64 nstime_idivide(&time, 2);
65 expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
66 new_pages / 2, "Not half of pages decay in half the decay period");
67}
68TEST_END
69
70TEST_BEGIN(test_decay_maybe_advance_epoch) {
71 decay_t decay;
72 memset(&decay, 0, sizeof(decay));
73
74 nstime_t curtime;
75 nstime_init(&curtime, 0);
76
77 uint64_t decay_ms = 1000;
78
79 bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
80 expect_false(err, "");
81
82 bool advanced;
83 advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
84 expect_false(advanced, "Epoch advanced while time didn't");
85
86 nstime_t interval;
87 nstime_init(&interval, decay_epoch_duration_ns(&decay));
88
89 nstime_add(&curtime, &interval);
90 advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
91 expect_false(advanced, "Epoch advanced after first interval");
92
93 nstime_add(&curtime, &interval);
94 advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
95 expect_true(advanced, "Epoch didn't advance after two intervals");
96}
97TEST_END
98
99TEST_BEGIN(test_decay_empty) {
100 /* If we never have any decaying pages, npages_limit should be 0. */
101 decay_t decay;
102 memset(&decay, 0, sizeof(decay));
103
104 nstime_t curtime;
105 nstime_init(&curtime, 0);
106
107 uint64_t decay_ms = 1000;
108 uint64_t decay_ns = decay_ms * 1000 * 1000;
109
110 bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
111 assert_false(err, "");
112
113 uint64_t time_between_calls = decay_epoch_duration_ns(&decay) / 5;
114 int nepochs = 0;
115 for (uint64_t i = 0; i < decay_ns / time_between_calls * 10; i++) {
116 size_t dirty_pages = 0;
117 nstime_init(&curtime, i * time_between_calls);
118 bool epoch_advanced = decay_maybe_advance_epoch(&decay,
119 &curtime, dirty_pages);
120 if (epoch_advanced) {
121 nepochs++;
122 expect_zu_eq(decay_npages_limit_get(&decay), 0,
123 "Unexpectedly increased npages_limit");
124 }
125 }
126 expect_d_gt(nepochs, 0, "Epochs never advanced");
127}
128TEST_END
129
130/*
131 * Verify that npages_limit correctly decays as the time goes.
132 *
133 * During first 'nepoch_init' epochs, add new dirty pages.
134 * After that, let them decay and verify npages_limit decreases.
135 * Then proceed with another 'nepoch_init' epochs and check that
136 * all dirty pages are flushed out of backlog, bringing npages_limit
137 * down to zero.
138 */
139TEST_BEGIN(test_decay) {
140 const uint64_t nepoch_init = 10;
141
142 decay_t decay;
143 memset(&decay, 0, sizeof(decay));
144
145 nstime_t curtime;
146 nstime_init(&curtime, 0);
147
148 uint64_t decay_ms = 1000;
149 uint64_t decay_ns = decay_ms * 1000 * 1000;
150
151 bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
152 assert_false(err, "");
153
154 expect_zu_eq(decay_npages_limit_get(&decay), 0,
155 "Empty decay returned nonzero npages_limit");
156
157 nstime_t epochtime;
158 nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
159
160 const size_t dirty_pages_per_epoch = 1000;
161 size_t dirty_pages = 0;
162 uint64_t epoch_ns = decay_epoch_duration_ns(&decay);
163 bool epoch_advanced = false;
164
165 /* Populate backlog with some dirty pages */
166 for (uint64_t i = 0; i < nepoch_init; i++) {
167 nstime_add(&curtime, &epochtime);
168 dirty_pages += dirty_pages_per_epoch;
169 epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
170 dirty_pages);
171 }
172 expect_true(epoch_advanced, "Epoch never advanced");
173
174 size_t npages_limit = decay_npages_limit_get(&decay);
175 expect_zu_gt(npages_limit, 0, "npages_limit is incorrectly equal "
176 "to zero after dirty pages have been added");
177
178 /* Keep dirty pages unchanged and verify that npages_limit decreases */
179 for (uint64_t i = nepoch_init; i * epoch_ns < decay_ns; ++i) {
180 nstime_add(&curtime, &epochtime);
181 epoch_advanced = decay_maybe_advance_epoch(&decay, &curtime,
182 dirty_pages);
183 if (epoch_advanced) {
184 size_t npages_limit_new = decay_npages_limit_get(&decay);
185 expect_zu_lt(npages_limit_new, npages_limit,
186 "napges_limit failed to decay");
187
188 npages_limit = npages_limit_new;
189 }
190 }
191
192 expect_zu_gt(npages_limit, 0, "npages_limit decayed to zero earlier "
193 "than decay_ms since last dirty page was added");
194
195 /* Completely push all dirty pages out of the backlog */
196 epoch_advanced = false;
197 for (uint64_t i = 0; i < nepoch_init; i++) {
198 nstime_add(&curtime, &epochtime);
199 epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
200 dirty_pages);
201 }
202 expect_true(epoch_advanced, "Epoch never advanced");
203
204 npages_limit = decay_npages_limit_get(&decay);
205 expect_zu_eq(npages_limit, 0, "npages_limit didn't decay to 0 after "
206 "decay_ms since last bump in dirty pages");
207}
208TEST_END
209
210TEST_BEGIN(test_decay_ns_until_purge) {
211 const uint64_t nepoch_init = 10;
212
213 decay_t decay;
214 memset(&decay, 0, sizeof(decay));
215
216 nstime_t curtime;
217 nstime_init(&curtime, 0);
218
219 uint64_t decay_ms = 1000;
220 uint64_t decay_ns = decay_ms * 1000 * 1000;
221
222 bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
223 assert_false(err, "");
224
225 nstime_t epochtime;
226 nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
227
228 uint64_t ns_until_purge_empty = decay_ns_until_purge(&decay, 0, 0);
229 expect_u64_eq(ns_until_purge_empty, DECAY_UNBOUNDED_TIME_TO_PURGE,
230 "Failed to return unbounded wait time for zero threshold");
231
232 const size_t dirty_pages_per_epoch = 1000;
233 size_t dirty_pages = 0;
234 bool epoch_advanced = false;
235 for (uint64_t i = 0; i < nepoch_init; i++) {
236 nstime_add(&curtime, &epochtime);
237 dirty_pages += dirty_pages_per_epoch;
238 epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
239 dirty_pages);
240 }
241 expect_true(epoch_advanced, "Epoch never advanced");
242
243 uint64_t ns_until_purge_all = decay_ns_until_purge(&decay,
244 dirty_pages, dirty_pages);
245 expect_u64_ge(ns_until_purge_all, decay_ns,
246 "Incorrectly calculated time to purge all pages");
247
248 uint64_t ns_until_purge_none = decay_ns_until_purge(&decay,
249 dirty_pages, 0);
250 expect_u64_eq(ns_until_purge_none, decay_epoch_duration_ns(&decay) * 2,
251 "Incorrectly calculated time to purge 0 pages");
252
253 uint64_t npages_threshold = dirty_pages / 2;
254 uint64_t ns_until_purge_half = decay_ns_until_purge(&decay,
255 dirty_pages, npages_threshold);
256
257 nstime_t waittime;
258 nstime_init(&waittime, ns_until_purge_half);
259 nstime_add(&curtime, &waittime);
260
261 decay_maybe_advance_epoch(&decay, &curtime, dirty_pages);
262 size_t npages_limit = decay_npages_limit_get(&decay);
263 expect_zu_lt(npages_limit, dirty_pages,
264 "npages_limit failed to decrease after waiting");
265 size_t expected = dirty_pages - npages_limit;
266 int deviation = abs((int)expected - (int)(npages_threshold));
267 expect_d_lt(deviation, (int)(npages_threshold / 2),
268 "After waiting, number of pages is out of the expected interval "
269 "[0.5 * npages_threshold .. 1.5 * npages_threshold]");
270}
271TEST_END
272
273int
274main(void) {
275 return test(
276 test_decay_init,
277 test_decay_ms_valid,
278 test_decay_npages_purge_in,
279 test_decay_maybe_advance_epoch,
280 test_decay_empty,
281 test_decay,
282 test_decay_ns_until_purge);
283}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/div.c b/examples/redis-unstable/deps/jemalloc/test/unit/div.c
deleted file mode 100644
index 29aea66..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/div.c
+++ /dev/null
@@ -1,29 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/div.h"
4
5TEST_BEGIN(test_div_exhaustive) {
6 for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) {
7 div_info_t div_info;
8 div_init(&div_info, divisor);
9 size_t max = 1000 * divisor;
10 if (max < 1000 * 1000) {
11 max = 1000 * 1000;
12 }
13 for (size_t dividend = 0; dividend < 1000 * divisor;
14 dividend += divisor) {
15 size_t quotient = div_compute(
16 &div_info, dividend);
17 expect_zu_eq(dividend, quotient * divisor,
18 "With divisor = %zu, dividend = %zu, "
19 "got quotient %zu", divisor, dividend, quotient);
20 }
21 }
22}
23TEST_END
24
25int
26main(void) {
27 return test_no_reentrancy(
28 test_div_exhaustive);
29}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/double_free.c b/examples/redis-unstable/deps/jemalloc/test/unit/double_free.c
deleted file mode 100644
index 12122c1..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/double_free.c
+++ /dev/null
@@ -1,77 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "test/san.h"
3
4#include "jemalloc/internal/safety_check.h"
5
6bool fake_abort_called;
7void fake_abort(const char *message) {
8 (void)message;
9 fake_abort_called = true;
10}
11
12void
13test_large_double_free_pre(void) {
14 safety_check_set_abort(&fake_abort);
15 fake_abort_called = false;
16}
17
18void
19test_large_double_free_post() {
20 expect_b_eq(fake_abort_called, true, "Double-free check didn't fire.");
21 safety_check_set_abort(NULL);
22}
23
24TEST_BEGIN(test_large_double_free_tcache) {
25 test_skip_if(!config_opt_safety_checks);
26 /*
27 * Skip debug builds, since too many assertions will be triggered with
28 * double-free before hitting the one we are interested in.
29 */
30 test_skip_if(config_debug);
31
32 test_large_double_free_pre();
33 char *ptr = malloc(SC_LARGE_MINCLASS);
34 bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
35 free(ptr);
36 if (!guarded) {
37 free(ptr);
38 } else {
39 /*
40 * Skip because guarded extents may unguard immediately on
41 * deallocation, in which case the second free will crash before
42 * reaching the intended safety check.
43 */
44 fake_abort_called = true;
45 }
46 mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
47 test_large_double_free_post();
48}
49TEST_END
50
51TEST_BEGIN(test_large_double_free_no_tcache) {
52 test_skip_if(!config_opt_safety_checks);
53 test_skip_if(config_debug);
54
55 test_large_double_free_pre();
56 char *ptr = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
57 bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
58 dallocx(ptr, MALLOCX_TCACHE_NONE);
59 if (!guarded) {
60 dallocx(ptr, MALLOCX_TCACHE_NONE);
61 } else {
62 /*
63 * Skip because guarded extents may unguard immediately on
64 * deallocation, in which case the second free will crash before
65 * reaching the intended safety check.
66 */
67 fake_abort_called = true;
68 }
69 test_large_double_free_post();
70}
71TEST_END
72
73int
74main(void) {
75 return test(test_large_double_free_no_tcache,
76 test_large_double_free_tcache);
77}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/double_free.h b/examples/redis-unstable/deps/jemalloc/test/unit/double_free.h
deleted file mode 100644
index 8b13789..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/double_free.h
+++ /dev/null
@@ -1 +0,0 @@
1
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c b/examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c
deleted file mode 100644
index af1110a..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c
+++ /dev/null
@@ -1,226 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/edata_cache.h"
4
5static void
6test_edata_cache_init(edata_cache_t *edata_cache) {
7 base_t *base = base_new(TSDN_NULL, /* ind */ 1,
8 &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
9 assert_ptr_not_null(base, "");
10 bool err = edata_cache_init(edata_cache, base);
11 assert_false(err, "");
12}
13
14static void
15test_edata_cache_destroy(edata_cache_t *edata_cache) {
16 base_delete(TSDN_NULL, edata_cache->base);
17}
18
19TEST_BEGIN(test_edata_cache) {
20 edata_cache_t ec;
21 test_edata_cache_init(&ec);
22
23 /* Get one */
24 edata_t *ed1 = edata_cache_get(TSDN_NULL, &ec);
25 assert_ptr_not_null(ed1, "");
26
27 /* Cache should be empty */
28 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
29
30 /* Get another */
31 edata_t *ed2 = edata_cache_get(TSDN_NULL, &ec);
32 assert_ptr_not_null(ed2, "");
33
34 /* Still empty */
35 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
36
37 /* Put one back, and the cache should now have one item */
38 edata_cache_put(TSDN_NULL, &ec, ed1);
39 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 1, "");
40
41 /* Reallocating should reuse the item, and leave an empty cache. */
42 edata_t *ed1_again = edata_cache_get(TSDN_NULL, &ec);
43 assert_ptr_eq(ed1, ed1_again, "");
44 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
45
46 test_edata_cache_destroy(&ec);
47}
48TEST_END
49
50static size_t
51ecf_count(edata_cache_fast_t *ecf) {
52 size_t count = 0;
53 edata_t *cur;
54 ql_foreach(cur, &ecf->list.head, ql_link_inactive) {
55 count++;
56 }
57 return count;
58}
59
60TEST_BEGIN(test_edata_cache_fast_simple) {
61 edata_cache_t ec;
62 edata_cache_fast_t ecf;
63
64 test_edata_cache_init(&ec);
65 edata_cache_fast_init(&ecf, &ec);
66
67 edata_t *ed1 = edata_cache_fast_get(TSDN_NULL, &ecf);
68 expect_ptr_not_null(ed1, "");
69 expect_zu_eq(ecf_count(&ecf), 0, "");
70 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
71
72 edata_t *ed2 = edata_cache_fast_get(TSDN_NULL, &ecf);
73 expect_ptr_not_null(ed2, "");
74 expect_zu_eq(ecf_count(&ecf), 0, "");
75 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
76
77 edata_cache_fast_put(TSDN_NULL, &ecf, ed1);
78 expect_zu_eq(ecf_count(&ecf), 1, "");
79 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
80
81 edata_cache_fast_put(TSDN_NULL, &ecf, ed2);
82 expect_zu_eq(ecf_count(&ecf), 2, "");
83 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
84
85 /* LIFO ordering. */
86 expect_ptr_eq(ed2, edata_cache_fast_get(TSDN_NULL, &ecf), "");
87 expect_zu_eq(ecf_count(&ecf), 1, "");
88 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
89
90 expect_ptr_eq(ed1, edata_cache_fast_get(TSDN_NULL, &ecf), "");
91 expect_zu_eq(ecf_count(&ecf), 0, "");
92 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
93
94 test_edata_cache_destroy(&ec);
95}
96TEST_END
97
98TEST_BEGIN(test_edata_cache_fill) {
99 edata_cache_t ec;
100 edata_cache_fast_t ecf;
101
102 test_edata_cache_init(&ec);
103 edata_cache_fast_init(&ecf, &ec);
104
105 edata_t *allocs[EDATA_CACHE_FAST_FILL * 2];
106
107 /*
108 * If the fallback cache can't satisfy the request, we shouldn't do
109 * extra allocations until compelled to. Put half the fill goal in the
110 * fallback.
111 */
112 for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
113 allocs[i] = edata_cache_get(TSDN_NULL, &ec);
114 }
115 for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
116 edata_cache_put(TSDN_NULL, &ec, allocs[i]);
117 }
118 expect_zu_eq(EDATA_CACHE_FAST_FILL / 2,
119 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
120
121 allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
122 expect_zu_eq(EDATA_CACHE_FAST_FILL / 2 - 1, ecf_count(&ecf),
123 "Should have grabbed all edatas available but no more.");
124
125 for (int i = 1; i < EDATA_CACHE_FAST_FILL / 2; i++) {
126 allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
127 expect_ptr_not_null(allocs[i], "");
128 }
129 expect_zu_eq(0, ecf_count(&ecf), "");
130
131 /* When forced, we should alloc from the base. */
132 edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
133 expect_ptr_not_null(edata, "");
134 expect_zu_eq(0, ecf_count(&ecf), "Allocated more than necessary");
135 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED),
136 "Allocated more than necessary");
137
138 /*
139 * We should correctly fill in the common case where the fallback isn't
140 * exhausted, too.
141 */
142 for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
143 allocs[i] = edata_cache_get(TSDN_NULL, &ec);
144 expect_ptr_not_null(allocs[i], "");
145 }
146 for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
147 edata_cache_put(TSDN_NULL, &ec, allocs[i]);
148 }
149
150 allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
151 expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
152 expect_zu_eq(EDATA_CACHE_FAST_FILL,
153 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
154 for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
155 expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
156 expect_zu_eq(EDATA_CACHE_FAST_FILL,
157 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
158 allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
159 expect_ptr_not_null(allocs[i], "");
160 }
161 expect_zu_eq(0, ecf_count(&ecf), "");
162 expect_zu_eq(EDATA_CACHE_FAST_FILL,
163 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
164
165 allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
166 expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
167 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
168 for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
169 expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
170 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
171 allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
172 expect_ptr_not_null(allocs[i], "");
173 }
174 expect_zu_eq(0, ecf_count(&ecf), "");
175 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
176
177 test_edata_cache_destroy(&ec);
178}
179TEST_END
180
181TEST_BEGIN(test_edata_cache_disable) {
182 edata_cache_t ec;
183 edata_cache_fast_t ecf;
184
185 test_edata_cache_init(&ec);
186 edata_cache_fast_init(&ecf, &ec);
187
188 for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
189 edata_t *edata = edata_cache_get(TSDN_NULL, &ec);
190 expect_ptr_not_null(edata, "");
191 edata_cache_fast_put(TSDN_NULL, &ecf, edata);
192 }
193
194 expect_zu_eq(EDATA_CACHE_FAST_FILL, ecf_count(&ecf), "");
195 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
196
197 edata_cache_fast_disable(TSDN_NULL, &ecf);
198
199 expect_zu_eq(0, ecf_count(&ecf), "");
200 expect_zu_eq(EDATA_CACHE_FAST_FILL,
201 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "Disabling should flush");
202
203 edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
204 expect_zu_eq(0, ecf_count(&ecf), "");
205 expect_zu_eq(EDATA_CACHE_FAST_FILL - 1,
206 atomic_load_zu(&ec.count, ATOMIC_RELAXED),
207 "Disabled ecf should forward on get");
208
209 edata_cache_fast_put(TSDN_NULL, &ecf, edata);
210 expect_zu_eq(0, ecf_count(&ecf), "");
211 expect_zu_eq(EDATA_CACHE_FAST_FILL,
212 atomic_load_zu(&ec.count, ATOMIC_RELAXED),
213 "Disabled ecf should forward on put");
214
215 test_edata_cache_destroy(&ec);
216}
217TEST_END
218
219int
220main(void) {
221 return test(
222 test_edata_cache,
223 test_edata_cache_fast_simple,
224 test_edata_cache_fill,
225 test_edata_cache_disable);
226}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/emitter.c b/examples/redis-unstable/deps/jemalloc/test/unit/emitter.c
deleted file mode 100644
index ef8f9ff..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/emitter.c
+++ /dev/null
@@ -1,533 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "jemalloc/internal/emitter.h"
3
4/*
5 * This is so useful for debugging and feature work, we'll leave printing
6 * functionality committed but disabled by default.
7 */
8/* Print the text as it will appear. */
9static bool print_raw = false;
10/* Print the text escaped, so it can be copied back into the test case. */
11static bool print_escaped = false;
12
13typedef struct buf_descriptor_s buf_descriptor_t;
14struct buf_descriptor_s {
15 char *buf;
16 size_t len;
17 bool mid_quote;
18};
19
20/*
21 * Forwards all writes to the passed-in buf_v (which should be cast from a
22 * buf_descriptor_t *).
23 */
24static void
25forwarding_cb(void *buf_descriptor_v, const char *str) {
26 buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v;
27
28 if (print_raw) {
29 malloc_printf("%s", str);
30 }
31 if (print_escaped) {
32 const char *it = str;
33 while (*it != '\0') {
34 if (!buf_descriptor->mid_quote) {
35 malloc_printf("\"");
36 buf_descriptor->mid_quote = true;
37 }
38 switch (*it) {
39 case '\\':
40 malloc_printf("\\");
41 break;
42 case '\"':
43 malloc_printf("\\\"");
44 break;
45 case '\t':
46 malloc_printf("\\t");
47 break;
48 case '\n':
49 malloc_printf("\\n\"\n");
50 buf_descriptor->mid_quote = false;
51 break;
52 default:
53 malloc_printf("%c", *it);
54 }
55 it++;
56 }
57 }
58
59 size_t written = malloc_snprintf(buf_descriptor->buf,
60 buf_descriptor->len, "%s", str);
61 expect_zu_eq(written, strlen(str), "Buffer overflow!");
62 buf_descriptor->buf += written;
63 buf_descriptor->len -= written;
64 expect_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
65}
66
67static void
68expect_emit_output(void (*emit_fn)(emitter_t *),
69 const char *expected_json_output,
70 const char *expected_json_compact_output,
71 const char *expected_table_output) {
72 emitter_t emitter;
73 char buf[MALLOC_PRINTF_BUFSIZE];
74 buf_descriptor_t buf_descriptor;
75
76 buf_descriptor.buf = buf;
77 buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
78 buf_descriptor.mid_quote = false;
79
80 emitter_init(&emitter, emitter_output_json, &forwarding_cb,
81 &buf_descriptor);
82 (*emit_fn)(&emitter);
83 expect_str_eq(expected_json_output, buf, "json output failure");
84
85 buf_descriptor.buf = buf;
86 buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
87 buf_descriptor.mid_quote = false;
88
89 emitter_init(&emitter, emitter_output_json_compact, &forwarding_cb,
90 &buf_descriptor);
91 (*emit_fn)(&emitter);
92 expect_str_eq(expected_json_compact_output, buf,
93 "compact json output failure");
94
95 buf_descriptor.buf = buf;
96 buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
97 buf_descriptor.mid_quote = false;
98
99 emitter_init(&emitter, emitter_output_table, &forwarding_cb,
100 &buf_descriptor);
101 (*emit_fn)(&emitter);
102 expect_str_eq(expected_table_output, buf, "table output failure");
103}
104
105static void
106emit_dict(emitter_t *emitter) {
107 bool b_false = false;
108 bool b_true = true;
109 int i_123 = 123;
110 const char *str = "a string";
111
112 emitter_begin(emitter);
113 emitter_dict_begin(emitter, "foo", "This is the foo table:");
114 emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false);
115 emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true);
116 emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123,
117 "note_key1", emitter_type_string, &str);
118 emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str,
119 "note_key2", emitter_type_bool, &b_false);
120 emitter_dict_end(emitter);
121 emitter_end(emitter);
122}
123
124static const char *dict_json =
125"{\n"
126"\t\"foo\": {\n"
127"\t\t\"abc\": false,\n"
128"\t\t\"def\": true,\n"
129"\t\t\"ghi\": 123,\n"
130"\t\t\"jkl\": \"a string\"\n"
131"\t}\n"
132"}\n";
133static const char *dict_json_compact =
134"{"
135 "\"foo\":{"
136 "\"abc\":false,"
137 "\"def\":true,"
138 "\"ghi\":123,"
139 "\"jkl\":\"a string\""
140 "}"
141"}";
142static const char *dict_table =
143"This is the foo table:\n"
144" ABC: false\n"
145" DEF: true\n"
146" GHI: 123 (note_key1: \"a string\")\n"
147" JKL: \"a string\" (note_key2: false)\n";
148
149static void
150emit_table_printf(emitter_t *emitter) {
151 emitter_begin(emitter);
152 emitter_table_printf(emitter, "Table note 1\n");
153 emitter_table_printf(emitter, "Table note 2 %s\n",
154 "with format string");
155 emitter_end(emitter);
156}
157
158static const char *table_printf_json =
159"{\n"
160"}\n";
161static const char *table_printf_json_compact = "{}";
162static const char *table_printf_table =
163"Table note 1\n"
164"Table note 2 with format string\n";
165
166static void emit_nested_dict(emitter_t *emitter) {
167 int val = 123;
168 emitter_begin(emitter);
169 emitter_dict_begin(emitter, "json1", "Dict 1");
170 emitter_dict_begin(emitter, "json2", "Dict 2");
171 emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val);
172 emitter_dict_end(emitter); /* Close 2 */
173 emitter_dict_begin(emitter, "json3", "Dict 3");
174 emitter_dict_end(emitter); /* Close 3 */
175 emitter_dict_end(emitter); /* Close 1 */
176 emitter_dict_begin(emitter, "json4", "Dict 4");
177 emitter_kv(emitter, "primitive", "Another primitive",
178 emitter_type_int, &val);
179 emitter_dict_end(emitter); /* Close 4 */
180 emitter_end(emitter);
181}
182
183static const char *nested_dict_json =
184"{\n"
185"\t\"json1\": {\n"
186"\t\t\"json2\": {\n"
187"\t\t\t\"primitive\": 123\n"
188"\t\t},\n"
189"\t\t\"json3\": {\n"
190"\t\t}\n"
191"\t},\n"
192"\t\"json4\": {\n"
193"\t\t\"primitive\": 123\n"
194"\t}\n"
195"}\n";
196static const char *nested_dict_json_compact =
197"{"
198 "\"json1\":{"
199 "\"json2\":{"
200 "\"primitive\":123"
201 "},"
202 "\"json3\":{"
203 "}"
204 "},"
205 "\"json4\":{"
206 "\"primitive\":123"
207 "}"
208"}";
209static const char *nested_dict_table =
210"Dict 1\n"
211" Dict 2\n"
212" A primitive: 123\n"
213" Dict 3\n"
214"Dict 4\n"
215" Another primitive: 123\n";
216
217static void
218emit_types(emitter_t *emitter) {
219 bool b = false;
220 int i = -123;
221 unsigned u = 123;
222 ssize_t zd = -456;
223 size_t zu = 456;
224 const char *str = "string";
225 uint32_t u32 = 789;
226 uint64_t u64 = 10000000000ULL;
227
228 emitter_begin(emitter);
229 emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b);
230 emitter_kv(emitter, "k2", "K2", emitter_type_int, &i);
231 emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u);
232 emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd);
233 emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu);
234 emitter_kv(emitter, "k6", "K6", emitter_type_string, &str);
235 emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32);
236 emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64);
237 /*
238 * We don't test the title type, since it's only used for tables. It's
239 * tested in the emitter_table_row tests.
240 */
241 emitter_end(emitter);
242}
243
244static const char *types_json =
245"{\n"
246"\t\"k1\": false,\n"
247"\t\"k2\": -123,\n"
248"\t\"k3\": 123,\n"
249"\t\"k4\": -456,\n"
250"\t\"k5\": 456,\n"
251"\t\"k6\": \"string\",\n"
252"\t\"k7\": 789,\n"
253"\t\"k8\": 10000000000\n"
254"}\n";
255static const char *types_json_compact =
256"{"
257 "\"k1\":false,"
258 "\"k2\":-123,"
259 "\"k3\":123,"
260 "\"k4\":-456,"
261 "\"k5\":456,"
262 "\"k6\":\"string\","
263 "\"k7\":789,"
264 "\"k8\":10000000000"
265"}";
266static const char *types_table =
267"K1: false\n"
268"K2: -123\n"
269"K3: 123\n"
270"K4: -456\n"
271"K5: 456\n"
272"K6: \"string\"\n"
273"K7: 789\n"
274"K8: 10000000000\n";
275
276static void
277emit_modal(emitter_t *emitter) {
278 int val = 123;
279 emitter_begin(emitter);
280 emitter_dict_begin(emitter, "j0", "T0");
281 emitter_json_key(emitter, "j1");
282 emitter_json_object_begin(emitter);
283 emitter_kv(emitter, "i1", "I1", emitter_type_int, &val);
284 emitter_json_kv(emitter, "i2", emitter_type_int, &val);
285 emitter_table_kv(emitter, "I3", emitter_type_int, &val);
286 emitter_table_dict_begin(emitter, "T1");
287 emitter_kv(emitter, "i4", "I4", emitter_type_int, &val);
288 emitter_json_object_end(emitter); /* Close j1 */
289 emitter_kv(emitter, "i5", "I5", emitter_type_int, &val);
290 emitter_table_dict_end(emitter); /* Close T1 */
291 emitter_kv(emitter, "i6", "I6", emitter_type_int, &val);
292 emitter_dict_end(emitter); /* Close j0 / T0 */
293 emitter_end(emitter);
294}
295
296const char *modal_json =
297"{\n"
298"\t\"j0\": {\n"
299"\t\t\"j1\": {\n"
300"\t\t\t\"i1\": 123,\n"
301"\t\t\t\"i2\": 123,\n"
302"\t\t\t\"i4\": 123\n"
303"\t\t},\n"
304"\t\t\"i5\": 123,\n"
305"\t\t\"i6\": 123\n"
306"\t}\n"
307"}\n";
308const char *modal_json_compact =
309"{"
310 "\"j0\":{"
311 "\"j1\":{"
312 "\"i1\":123,"
313 "\"i2\":123,"
314 "\"i4\":123"
315 "},"
316 "\"i5\":123,"
317 "\"i6\":123"
318 "}"
319"}";
320const char *modal_table =
321"T0\n"
322" I1: 123\n"
323" I3: 123\n"
324" T1\n"
325" I4: 123\n"
326" I5: 123\n"
327" I6: 123\n";
328
329static void
330emit_json_array(emitter_t *emitter) {
331 int ival = 123;
332
333 emitter_begin(emitter);
334 emitter_json_key(emitter, "dict");
335 emitter_json_object_begin(emitter);
336 emitter_json_key(emitter, "arr");
337 emitter_json_array_begin(emitter);
338 emitter_json_object_begin(emitter);
339 emitter_json_kv(emitter, "foo", emitter_type_int, &ival);
340 emitter_json_object_end(emitter); /* Close arr[0] */
341 /* arr[1] and arr[2] are primitives. */
342 emitter_json_value(emitter, emitter_type_int, &ival);
343 emitter_json_value(emitter, emitter_type_int, &ival);
344 emitter_json_object_begin(emitter);
345 emitter_json_kv(emitter, "bar", emitter_type_int, &ival);
346 emitter_json_kv(emitter, "baz", emitter_type_int, &ival);
347 emitter_json_object_end(emitter); /* Close arr[3]. */
348 emitter_json_array_end(emitter); /* Close arr. */
349 emitter_json_object_end(emitter); /* Close dict. */
350 emitter_end(emitter);
351}
352
353static const char *json_array_json =
354"{\n"
355"\t\"dict\": {\n"
356"\t\t\"arr\": [\n"
357"\t\t\t{\n"
358"\t\t\t\t\"foo\": 123\n"
359"\t\t\t},\n"
360"\t\t\t123,\n"
361"\t\t\t123,\n"
362"\t\t\t{\n"
363"\t\t\t\t\"bar\": 123,\n"
364"\t\t\t\t\"baz\": 123\n"
365"\t\t\t}\n"
366"\t\t]\n"
367"\t}\n"
368"}\n";
369static const char *json_array_json_compact =
370"{"
371 "\"dict\":{"
372 "\"arr\":["
373 "{"
374 "\"foo\":123"
375 "},"
376 "123,"
377 "123,"
378 "{"
379 "\"bar\":123,"
380 "\"baz\":123"
381 "}"
382 "]"
383 "}"
384"}";
385static const char *json_array_table = "";
386
387static void
388emit_json_nested_array(emitter_t *emitter) {
389 int ival = 123;
390 char *sval = "foo";
391 emitter_begin(emitter);
392 emitter_json_array_begin(emitter);
393 emitter_json_array_begin(emitter);
394 emitter_json_value(emitter, emitter_type_int, &ival);
395 emitter_json_value(emitter, emitter_type_string, &sval);
396 emitter_json_value(emitter, emitter_type_int, &ival);
397 emitter_json_value(emitter, emitter_type_string, &sval);
398 emitter_json_array_end(emitter);
399 emitter_json_array_begin(emitter);
400 emitter_json_value(emitter, emitter_type_int, &ival);
401 emitter_json_array_end(emitter);
402 emitter_json_array_begin(emitter);
403 emitter_json_value(emitter, emitter_type_string, &sval);
404 emitter_json_value(emitter, emitter_type_int, &ival);
405 emitter_json_array_end(emitter);
406 emitter_json_array_begin(emitter);
407 emitter_json_array_end(emitter);
408 emitter_json_array_end(emitter);
409 emitter_end(emitter);
410}
411
412static const char *json_nested_array_json =
413"{\n"
414"\t[\n"
415"\t\t[\n"
416"\t\t\t123,\n"
417"\t\t\t\"foo\",\n"
418"\t\t\t123,\n"
419"\t\t\t\"foo\"\n"
420"\t\t],\n"
421"\t\t[\n"
422"\t\t\t123\n"
423"\t\t],\n"
424"\t\t[\n"
425"\t\t\t\"foo\",\n"
426"\t\t\t123\n"
427"\t\t],\n"
428"\t\t[\n"
429"\t\t]\n"
430"\t]\n"
431"}\n";
432static const char *json_nested_array_json_compact =
433"{"
434 "["
435 "["
436 "123,"
437 "\"foo\","
438 "123,"
439 "\"foo\""
440 "],"
441 "["
442 "123"
443 "],"
444 "["
445 "\"foo\","
446 "123"
447 "],"
448 "["
449 "]"
450 "]"
451"}";
452static const char *json_nested_array_table = "";
453
454static void
455emit_table_row(emitter_t *emitter) {
456 emitter_begin(emitter);
457 emitter_row_t row;
458 emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}};
459 abc.str_val = "ABC title";
460 emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}};
461 def.str_val = "DEF title";
462 emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}};
463 ghi.str_val = "GHI";
464
465 emitter_row_init(&row);
466 emitter_col_init(&abc, &row);
467 emitter_col_init(&def, &row);
468 emitter_col_init(&ghi, &row);
469
470 emitter_table_row(emitter, &row);
471
472 abc.type = emitter_type_int;
473 def.type = emitter_type_bool;
474 ghi.type = emitter_type_int;
475
476 abc.int_val = 123;
477 def.bool_val = true;
478 ghi.int_val = 456;
479 emitter_table_row(emitter, &row);
480
481 abc.int_val = 789;
482 def.bool_val = false;
483 ghi.int_val = 1011;
484 emitter_table_row(emitter, &row);
485
486 abc.type = emitter_type_string;
487 abc.str_val = "a string";
488 def.bool_val = false;
489 ghi.type = emitter_type_title;
490 ghi.str_val = "ghi";
491 emitter_table_row(emitter, &row);
492
493 emitter_end(emitter);
494}
495
496static const char *table_row_json =
497"{\n"
498"}\n";
499static const char *table_row_json_compact = "{}";
500static const char *table_row_table =
501"ABC title DEF title GHI\n"
502"123 true 456\n"
503"789 false 1011\n"
504"\"a string\" false ghi\n";
505
506#define GENERATE_TEST(feature) \
507TEST_BEGIN(test_##feature) { \
508 expect_emit_output(emit_##feature, feature##_json, \
509 feature##_json_compact, feature##_table); \
510} \
511TEST_END
512
513GENERATE_TEST(dict)
514GENERATE_TEST(table_printf)
515GENERATE_TEST(nested_dict)
516GENERATE_TEST(types)
517GENERATE_TEST(modal)
518GENERATE_TEST(json_array)
519GENERATE_TEST(json_nested_array)
520GENERATE_TEST(table_row)
521
522int
523main(void) {
524 return test_no_reentrancy(
525 test_dict,
526 test_table_printf,
527 test_nested_dict,
528 test_types,
529 test_modal,
530 test_json_array,
531 test_json_nested_array,
532 test_table_row);
533}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/extent_quantize.c b/examples/redis-unstable/deps/jemalloc/test/unit/extent_quantize.c
deleted file mode 100644
index e6bbd53..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/extent_quantize.c
+++ /dev/null
@@ -1,141 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_small_extent_size) {
4 unsigned nbins, i;
5 size_t sz, extent_size;
6 size_t mib[4];
7 size_t miblen = sizeof(mib) / sizeof(size_t);
8
9 /*
10 * Iterate over all small size classes, get their extent sizes, and
11 * verify that the quantized size is the same as the extent size.
12 */
13
14 sz = sizeof(unsigned);
15 expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
16 "Unexpected mallctl failure");
17
18 expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
19 "Unexpected mallctlnametomib failure");
20 for (i = 0; i < nbins; i++) {
21 mib[2] = i;
22 sz = sizeof(size_t);
23 expect_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
24 NULL, 0), 0, "Unexpected mallctlbymib failure");
25 expect_zu_eq(extent_size,
26 sz_psz_quantize_floor(extent_size),
27 "Small extent quantization should be a no-op "
28 "(extent_size=%zu)", extent_size);
29 expect_zu_eq(extent_size,
30 sz_psz_quantize_ceil(extent_size),
31 "Small extent quantization should be a no-op "
32 "(extent_size=%zu)", extent_size);
33 }
34}
35TEST_END
36
37TEST_BEGIN(test_large_extent_size) {
38 bool cache_oblivious;
39 unsigned nlextents, i;
40 size_t sz, extent_size_prev, ceil_prev;
41 size_t mib[4];
42 size_t miblen = sizeof(mib) / sizeof(size_t);
43
44 /*
45 * Iterate over all large size classes, get their extent sizes, and
46 * verify that the quantized size is the same as the extent size.
47 */
48
49 sz = sizeof(bool);
50 expect_d_eq(mallctl("opt.cache_oblivious", (void *)&cache_oblivious,
51 &sz, NULL, 0), 0, "Unexpected mallctl failure");
52
53 sz = sizeof(unsigned);
54 expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
55 0), 0, "Unexpected mallctl failure");
56
57 expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
58 "Unexpected mallctlnametomib failure");
59 for (i = 0; i < nlextents; i++) {
60 size_t lextent_size, extent_size, floor, ceil;
61
62 mib[2] = i;
63 sz = sizeof(size_t);
64 expect_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
65 &sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
66 extent_size = cache_oblivious ? lextent_size + PAGE :
67 lextent_size;
68 floor = sz_psz_quantize_floor(extent_size);
69 ceil = sz_psz_quantize_ceil(extent_size);
70
71 expect_zu_eq(extent_size, floor,
72 "Extent quantization should be a no-op for precise size "
73 "(lextent_size=%zu, extent_size=%zu)", lextent_size,
74 extent_size);
75 expect_zu_eq(extent_size, ceil,
76 "Extent quantization should be a no-op for precise size "
77 "(lextent_size=%zu, extent_size=%zu)", lextent_size,
78 extent_size);
79
80 if (i > 0) {
81 expect_zu_eq(extent_size_prev,
82 sz_psz_quantize_floor(extent_size - PAGE),
83 "Floor should be a precise size");
84 if (extent_size_prev < ceil_prev) {
85 expect_zu_eq(ceil_prev, extent_size,
86 "Ceiling should be a precise size "
87 "(extent_size_prev=%zu, ceil_prev=%zu, "
88 "extent_size=%zu)", extent_size_prev,
89 ceil_prev, extent_size);
90 }
91 }
92 if (i + 1 < nlextents) {
93 extent_size_prev = floor;
94 ceil_prev = sz_psz_quantize_ceil(extent_size +
95 PAGE);
96 }
97 }
98}
99TEST_END
100
101TEST_BEGIN(test_monotonic) {
102#define SZ_MAX ZU(4 * 1024 * 1024)
103 unsigned i;
104 size_t floor_prev, ceil_prev;
105
106 floor_prev = 0;
107 ceil_prev = 0;
108 for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
109 size_t extent_size, floor, ceil;
110
111 extent_size = i << LG_PAGE;
112 floor = sz_psz_quantize_floor(extent_size);
113 ceil = sz_psz_quantize_ceil(extent_size);
114
115 expect_zu_le(floor, extent_size,
116 "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
117 floor, extent_size, ceil);
118 expect_zu_ge(ceil, extent_size,
119 "Ceiling should be >= (floor=%zu, extent_size=%zu, "
120 "ceil=%zu)", floor, extent_size, ceil);
121
122 expect_zu_le(floor_prev, floor, "Floor should be monotonic "
123 "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
124 floor_prev, floor, extent_size, ceil);
125 expect_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
126 "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
127 floor, extent_size, ceil_prev, ceil);
128
129 floor_prev = floor;
130 ceil_prev = ceil;
131 }
132}
133TEST_END
134
135int
136main(void) {
137 return test(
138 test_small_extent_size,
139 test_large_extent_size,
140 test_monotonic);
141}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/fb.c b/examples/redis-unstable/deps/jemalloc/test/unit/fb.c
deleted file mode 100644
index ad72c75..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/fb.c
+++ /dev/null
@@ -1,954 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/fb.h"
4#include "test/nbits.h"
5
6static void
7do_test_init(size_t nbits) {
8 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
9 fb_group_t *fb = malloc(sz);
10 /* Junk fb's contents. */
11 memset(fb, 99, sz);
12 fb_init(fb, nbits);
13 for (size_t i = 0; i < nbits; i++) {
14 expect_false(fb_get(fb, nbits, i),
15 "bitmap should start empty");
16 }
17 free(fb);
18}
19
20TEST_BEGIN(test_fb_init) {
21#define NB(nbits) \
22 do_test_init(nbits);
23 NBITS_TAB
24#undef NB
25}
26TEST_END
27
28static void
29do_test_get_set_unset(size_t nbits) {
30 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
31 fb_group_t *fb = malloc(sz);
32 fb_init(fb, nbits);
33 /* Set the bits divisible by 3. */
34 for (size_t i = 0; i < nbits; i++) {
35 if (i % 3 == 0) {
36 fb_set(fb, nbits, i);
37 }
38 }
39 /* Check them. */
40 for (size_t i = 0; i < nbits; i++) {
41 expect_b_eq(i % 3 == 0, fb_get(fb, nbits, i),
42 "Unexpected bit at position %zu", i);
43 }
44 /* Unset those divisible by 5. */
45 for (size_t i = 0; i < nbits; i++) {
46 if (i % 5 == 0) {
47 fb_unset(fb, nbits, i);
48 }
49 }
50 /* Check them. */
51 for (size_t i = 0; i < nbits; i++) {
52 expect_b_eq(i % 3 == 0 && i % 5 != 0, fb_get(fb, nbits, i),
53 "Unexpected bit at position %zu", i);
54 }
55 free(fb);
56}
57
58TEST_BEGIN(test_get_set_unset) {
59#define NB(nbits) \
60 do_test_get_set_unset(nbits);
61 NBITS_TAB
62#undef NB
63}
64TEST_END
65
66static ssize_t
67find_3_5_compute(ssize_t i, size_t nbits, bool bit, bool forward) {
68 for(; i < (ssize_t)nbits && i >= 0; i += (forward ? 1 : -1)) {
69 bool expected_bit = i % 3 == 0 || i % 5 == 0;
70 if (expected_bit == bit) {
71 return i;
72 }
73 }
74 return forward ? (ssize_t)nbits : (ssize_t)-1;
75}
76
77static void
78do_test_search_simple(size_t nbits) {
79 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
80 fb_group_t *fb = malloc(sz);
81 fb_init(fb, nbits);
82
83 /* We pick multiples of 3 or 5. */
84 for (size_t i = 0; i < nbits; i++) {
85 if (i % 3 == 0) {
86 fb_set(fb, nbits, i);
87 }
88 /* This tests double-setting a little, too. */
89 if (i % 5 == 0) {
90 fb_set(fb, nbits, i);
91 }
92 }
93 for (size_t i = 0; i < nbits; i++) {
94 size_t ffs_compute = find_3_5_compute(i, nbits, true, true);
95 size_t ffs_search = fb_ffs(fb, nbits, i);
96 expect_zu_eq(ffs_compute, ffs_search, "ffs mismatch at %zu", i);
97
98 ssize_t fls_compute = find_3_5_compute(i, nbits, true, false);
99 size_t fls_search = fb_fls(fb, nbits, i);
100 expect_zu_eq(fls_compute, fls_search, "fls mismatch at %zu", i);
101
102 size_t ffu_compute = find_3_5_compute(i, nbits, false, true);
103 size_t ffu_search = fb_ffu(fb, nbits, i);
104 expect_zu_eq(ffu_compute, ffu_search, "ffu mismatch at %zu", i);
105
106 size_t flu_compute = find_3_5_compute(i, nbits, false, false);
107 size_t flu_search = fb_flu(fb, nbits, i);
108 expect_zu_eq(flu_compute, flu_search, "flu mismatch at %zu", i);
109 }
110
111 free(fb);
112}
113
114TEST_BEGIN(test_search_simple) {
115#define NB(nbits) \
116 do_test_search_simple(nbits);
117 NBITS_TAB
118#undef NB
119}
120TEST_END
121
122static void
123expect_exhaustive_results(fb_group_t *mostly_full, fb_group_t *mostly_empty,
124 size_t nbits, size_t special_bit, size_t position) {
125 if (position < special_bit) {
126 expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
127 "mismatch at %zu, %zu", position, special_bit);
128 expect_zd_eq(-1, fb_fls(mostly_empty, nbits, position),
129 "mismatch at %zu, %zu", position, special_bit);
130 expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
131 "mismatch at %zu, %zu", position, special_bit);
132 expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
133 "mismatch at %zu, %zu", position, special_bit);
134
135 expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
136 "mismatch at %zu, %zu", position, special_bit);
137 expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
138 "mismatch at %zu, %zu", position, special_bit);
139 expect_zu_eq(special_bit, fb_ffu(mostly_full, nbits, position),
140 "mismatch at %zu, %zu", position, special_bit);
141 expect_zd_eq(-1, fb_flu(mostly_full, nbits, position),
142 "mismatch at %zu, %zu", position, special_bit);
143 } else if (position == special_bit) {
144 expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
145 "mismatch at %zu, %zu", position, special_bit);
146 expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits, position),
147 "mismatch at %zu, %zu", position, special_bit);
148 expect_zu_eq(position + 1, fb_ffu(mostly_empty, nbits, position),
149 "mismatch at %zu, %zu", position, special_bit);
150 expect_zd_eq(position - 1, fb_flu(mostly_empty, nbits,
151 position), "mismatch at %zu, %zu", position, special_bit);
152
153 expect_zu_eq(position + 1, fb_ffs(mostly_full, nbits, position),
154 "mismatch at %zu, %zu", position, special_bit);
155 expect_zd_eq(position - 1, fb_fls(mostly_full, nbits,
156 position), "mismatch at %zu, %zu", position, special_bit);
157 expect_zu_eq(position, fb_ffu(mostly_full, nbits, position),
158 "mismatch at %zu, %zu", position, special_bit);
159 expect_zd_eq(position, fb_flu(mostly_full, nbits, position),
160 "mismatch at %zu, %zu", position, special_bit);
161 } else {
162 /* position > special_bit. */
163 expect_zu_eq(nbits, fb_ffs(mostly_empty, nbits, position),
164 "mismatch at %zu, %zu", position, special_bit);
165 expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits,
166 position), "mismatch at %zu, %zu", position, special_bit);
167 expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
168 "mismatch at %zu, %zu", position, special_bit);
169 expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
170 "mismatch at %zu, %zu", position, special_bit);
171
172 expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
173 "mismatch at %zu, %zu", position, special_bit);
174 expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
175 "mismatch at %zu, %zu", position, special_bit);
176 expect_zu_eq(nbits, fb_ffu(mostly_full, nbits, position),
177 "mismatch at %zu, %zu", position, special_bit);
178 expect_zd_eq(special_bit, fb_flu(mostly_full, nbits, position),
179 "mismatch at %zu, %zu", position, special_bit);
180 }
181}
182
183static void
184do_test_search_exhaustive(size_t nbits) {
185 /* This test is quadratic; let's not get too big. */
186 if (nbits > 1000) {
187 return;
188 }
189 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
190 fb_group_t *empty = malloc(sz);
191 fb_init(empty, nbits);
192 fb_group_t *full = malloc(sz);
193 fb_init(full, nbits);
194 fb_set_range(full, nbits, 0, nbits);
195
196 for (size_t i = 0; i < nbits; i++) {
197 fb_set(empty, nbits, i);
198 fb_unset(full, nbits, i);
199
200 for (size_t j = 0; j < nbits; j++) {
201 expect_exhaustive_results(full, empty, nbits, i, j);
202 }
203 fb_unset(empty, nbits, i);
204 fb_set(full, nbits, i);
205 }
206
207 free(empty);
208 free(full);
209}
210
211TEST_BEGIN(test_search_exhaustive) {
212#define NB(nbits) \
213 do_test_search_exhaustive(nbits);
214 NBITS_TAB
215#undef NB
216}
217TEST_END
218
219TEST_BEGIN(test_range_simple) {
220 /*
221 * Just pick a constant big enough to have nontrivial middle sizes, and
222 * big enough that usages of things like weirdnum (below) near the
223 * beginning fit comfortably into the beginning of the bitmap.
224 */
225 size_t nbits = 64 * 10;
226 size_t ngroups = FB_NGROUPS(nbits);
227 fb_group_t *fb = malloc(sizeof(fb_group_t) * ngroups);
228 fb_init(fb, nbits);
229 for (size_t i = 0; i < nbits; i++) {
230 if (i % 2 == 0) {
231 fb_set_range(fb, nbits, i, 1);
232 }
233 }
234 for (size_t i = 0; i < nbits; i++) {
235 expect_b_eq(i % 2 == 0, fb_get(fb, nbits, i),
236 "mismatch at position %zu", i);
237 }
238 fb_set_range(fb, nbits, 0, nbits / 2);
239 fb_unset_range(fb, nbits, nbits / 2, nbits / 2);
240 for (size_t i = 0; i < nbits; i++) {
241 expect_b_eq(i < nbits / 2, fb_get(fb, nbits, i),
242 "mismatch at position %zu", i);
243 }
244
245 static const size_t weirdnum = 7;
246 fb_set_range(fb, nbits, 0, nbits);
247 fb_unset_range(fb, nbits, weirdnum, FB_GROUP_BITS + weirdnum);
248 for (size_t i = 0; i < nbits; i++) {
249 expect_b_eq(7 <= i && i <= 2 * weirdnum + FB_GROUP_BITS - 1,
250 !fb_get(fb, nbits, i), "mismatch at position %zu", i);
251 }
252 free(fb);
253}
254TEST_END
255
256static void
257do_test_empty_full_exhaustive(size_t nbits) {
258 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
259 fb_group_t *empty = malloc(sz);
260 fb_init(empty, nbits);
261 fb_group_t *full = malloc(sz);
262 fb_init(full, nbits);
263 fb_set_range(full, nbits, 0, nbits);
264
265 expect_true(fb_full(full, nbits), "");
266 expect_false(fb_empty(full, nbits), "");
267 expect_false(fb_full(empty, nbits), "");
268 expect_true(fb_empty(empty, nbits), "");
269
270 for (size_t i = 0; i < nbits; i++) {
271 fb_set(empty, nbits, i);
272 fb_unset(full, nbits, i);
273
274 expect_false(fb_empty(empty, nbits), "error at bit %zu", i);
275 if (nbits != 1) {
276 expect_false(fb_full(empty, nbits),
277 "error at bit %zu", i);
278 expect_false(fb_empty(full, nbits),
279 "error at bit %zu", i);
280 } else {
281 expect_true(fb_full(empty, nbits),
282 "error at bit %zu", i);
283 expect_true(fb_empty(full, nbits),
284 "error at bit %zu", i);
285 }
286 expect_false(fb_full(full, nbits), "error at bit %zu", i);
287
288 fb_unset(empty, nbits, i);
289 fb_set(full, nbits, i);
290 }
291
292 free(empty);
293 free(full);
294}
295
296TEST_BEGIN(test_empty_full) {
297#define NB(nbits) \
298 do_test_empty_full_exhaustive(nbits);
299 NBITS_TAB
300#undef NB
301}
302TEST_END
303
304/*
305 * This tests both iter_range and the longest range functionality, which is
306 * built closely on top of it.
307 */
308TEST_BEGIN(test_iter_range_simple) {
309 size_t set_limit = 30;
310 size_t nbits = 100;
311 fb_group_t fb[FB_NGROUPS(100)];
312
313 fb_init(fb, nbits);
314
315 /*
316 * Failing to initialize these can lead to build failures with -Wall;
317 * the compiler can't prove that they're set.
318 */
319 size_t begin = (size_t)-1;
320 size_t len = (size_t)-1;
321 bool result;
322
323 /* A set of checks with only the first set_limit bits *set*. */
324 fb_set_range(fb, nbits, 0, set_limit);
325 expect_zu_eq(set_limit, fb_srange_longest(fb, nbits),
326 "Incorrect longest set range");
327 expect_zu_eq(nbits - set_limit, fb_urange_longest(fb, nbits),
328 "Incorrect longest unset range");
329 for (size_t i = 0; i < set_limit; i++) {
330 result = fb_srange_iter(fb, nbits, i, &begin, &len);
331 expect_true(result, "Should have found a range at %zu", i);
332 expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
333 expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
334
335 result = fb_urange_iter(fb, nbits, i, &begin, &len);
336 expect_true(result, "Should have found a range at %zu", i);
337 expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
338 expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
339
340 result = fb_srange_riter(fb, nbits, i, &begin, &len);
341 expect_true(result, "Should have found a range at %zu", i);
342 expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
343 expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
344
345 result = fb_urange_riter(fb, nbits, i, &begin, &len);
346 expect_false(result, "Should not have found a range at %zu", i);
347 }
348 for (size_t i = set_limit; i < nbits; i++) {
349 result = fb_srange_iter(fb, nbits, i, &begin, &len);
350 expect_false(result, "Should not have found a range at %zu", i);
351
352 result = fb_urange_iter(fb, nbits, i, &begin, &len);
353 expect_true(result, "Should have found a range at %zu", i);
354 expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
355 expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
356
357 result = fb_srange_riter(fb, nbits, i, &begin, &len);
358 expect_true(result, "Should have found a range at %zu", i);
359 expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
360 expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
361
362 result = fb_urange_riter(fb, nbits, i, &begin, &len);
363 expect_true(result, "Should have found a range at %zu", i);
364 expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
365 expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
366 }
367
368 /* A set of checks with only the first set_limit bits *unset*. */
369 fb_unset_range(fb, nbits, 0, set_limit);
370 fb_set_range(fb, nbits, set_limit, nbits - set_limit);
371 expect_zu_eq(nbits - set_limit, fb_srange_longest(fb, nbits),
372 "Incorrect longest set range");
373 expect_zu_eq(set_limit, fb_urange_longest(fb, nbits),
374 "Incorrect longest unset range");
375 for (size_t i = 0; i < set_limit; i++) {
376 result = fb_srange_iter(fb, nbits, i, &begin, &len);
377 expect_true(result, "Should have found a range at %zu", i);
378 expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
379 expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
380
381 result = fb_urange_iter(fb, nbits, i, &begin, &len);
382 expect_true(result, "Should have found a range at %zu", i);
383 expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
384 expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
385
386 result = fb_srange_riter(fb, nbits, i, &begin, &len);
387 expect_false(result, "Should not have found a range at %zu", i);
388
389 result = fb_urange_riter(fb, nbits, i, &begin, &len);
390 expect_true(result, "Should not have found a range at %zu", i);
391 expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
392 expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
393 }
394 for (size_t i = set_limit; i < nbits; i++) {
395 result = fb_srange_iter(fb, nbits, i, &begin, &len);
396 expect_true(result, "Should have found a range at %zu", i);
397 expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
398 expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
399
400 result = fb_urange_iter(fb, nbits, i, &begin, &len);
401 expect_false(result, "Should not have found a range at %zu", i);
402
403 result = fb_srange_riter(fb, nbits, i, &begin, &len);
404 expect_true(result, "Should have found a range at %zu", i);
405 expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
406 expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
407
408 result = fb_urange_riter(fb, nbits, i, &begin, &len);
409 expect_true(result, "Should have found a range at %zu", i);
410 expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
411 expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
412 }
413
414}
415TEST_END
416
417/*
418 * Doing this bit-by-bit is too slow for a real implementation, but for testing
419 * code, it's easy to get right. In the exhaustive tests, we'll compare the
420 * (fast but tricky) real implementation against the (slow but simple) testing
421 * one.
422 */
423static bool
424fb_iter_simple(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
425 size_t *r_len, bool val, bool forward) {
426 ssize_t stride = (forward ? (ssize_t)1 : (ssize_t)-1);
427 ssize_t range_begin = (ssize_t)start;
428 for (; range_begin != (ssize_t)nbits && range_begin != -1;
429 range_begin += stride) {
430 if (fb_get(fb, nbits, range_begin) == val) {
431 ssize_t range_end = range_begin;
432 for (; range_end != (ssize_t)nbits && range_end != -1;
433 range_end += stride) {
434 if (fb_get(fb, nbits, range_end) != val) {
435 break;
436 }
437 }
438 if (forward) {
439 *r_begin = range_begin;
440 *r_len = range_end - range_begin;
441 } else {
442 *r_begin = range_end + 1;
443 *r_len = range_begin - range_end;
444 }
445 return true;
446 }
447 }
448 return false;
449}
450
451/* Similar, but for finding longest ranges. */
452static size_t
453fb_range_longest_simple(fb_group_t *fb, size_t nbits, bool val) {
454 size_t longest_so_far = 0;
455 for (size_t begin = 0; begin < nbits; begin++) {
456 if (fb_get(fb, nbits, begin) != val) {
457 continue;
458 }
459 size_t end = begin + 1;
460 for (; end < nbits; end++) {
461 if (fb_get(fb, nbits, end) != val) {
462 break;
463 }
464 }
465 if (end - begin > longest_so_far) {
466 longest_so_far = end - begin;
467 }
468 }
469 return longest_so_far;
470}
471
472static void
473expect_iter_results_at(fb_group_t *fb, size_t nbits, size_t pos,
474 bool val, bool forward) {
475 bool iter_res;
476 size_t iter_begin JEMALLOC_CC_SILENCE_INIT(0);
477 size_t iter_len JEMALLOC_CC_SILENCE_INIT(0);
478 if (val) {
479 if (forward) {
480 iter_res = fb_srange_iter(fb, nbits, pos,
481 &iter_begin, &iter_len);
482 } else {
483 iter_res = fb_srange_riter(fb, nbits, pos,
484 &iter_begin, &iter_len);
485 }
486 } else {
487 if (forward) {
488 iter_res = fb_urange_iter(fb, nbits, pos,
489 &iter_begin, &iter_len);
490 } else {
491 iter_res = fb_urange_riter(fb, nbits, pos,
492 &iter_begin, &iter_len);
493 }
494 }
495
496 bool simple_iter_res;
497 /*
498 * These are dead stores, but the compiler can't always figure that out
499 * statically, and warns on the uninitialized variable.
500 */
501 size_t simple_iter_begin = 0;
502 size_t simple_iter_len = 0;
503 simple_iter_res = fb_iter_simple(fb, nbits, pos, &simple_iter_begin,
504 &simple_iter_len, val, forward);
505
506 expect_b_eq(iter_res, simple_iter_res, "Result mismatch at %zu", pos);
507 if (iter_res && simple_iter_res) {
508 assert_zu_eq(iter_begin, simple_iter_begin,
509 "Begin mismatch at %zu", pos);
510 expect_zu_eq(iter_len, simple_iter_len,
511 "Length mismatch at %zu", pos);
512 }
513}
514
515static void
516expect_iter_results(fb_group_t *fb, size_t nbits) {
517 for (size_t i = 0; i < nbits; i++) {
518 expect_iter_results_at(fb, nbits, i, false, false);
519 expect_iter_results_at(fb, nbits, i, false, true);
520 expect_iter_results_at(fb, nbits, i, true, false);
521 expect_iter_results_at(fb, nbits, i, true, true);
522 }
523 expect_zu_eq(fb_range_longest_simple(fb, nbits, true),
524 fb_srange_longest(fb, nbits), "Longest range mismatch");
525 expect_zu_eq(fb_range_longest_simple(fb, nbits, false),
526 fb_urange_longest(fb, nbits), "Longest range mismatch");
527}
528
529static void
530set_pattern_3(fb_group_t *fb, size_t nbits, bool zero_val) {
531 for (size_t i = 0; i < nbits; i++) {
532 if ((i % 6 < 3 && zero_val) || (i % 6 >= 3 && !zero_val)) {
533 fb_set(fb, nbits, i);
534 } else {
535 fb_unset(fb, nbits, i);
536 }
537 }
538}
539
540static void
541do_test_iter_range_exhaustive(size_t nbits) {
542 /* This test is also pretty slow. */
543 if (nbits > 1000) {
544 return;
545 }
546 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
547 fb_group_t *fb = malloc(sz);
548 fb_init(fb, nbits);
549
550 set_pattern_3(fb, nbits, /* zero_val */ true);
551 expect_iter_results(fb, nbits);
552
553 set_pattern_3(fb, nbits, /* zero_val */ false);
554 expect_iter_results(fb, nbits);
555
556 fb_set_range(fb, nbits, 0, nbits);
557 fb_unset_range(fb, nbits, 0, nbits / 2 == 0 ? 1 : nbits / 2);
558 expect_iter_results(fb, nbits);
559
560 fb_unset_range(fb, nbits, 0, nbits);
561 fb_set_range(fb, nbits, 0, nbits / 2 == 0 ? 1: nbits / 2);
562 expect_iter_results(fb, nbits);
563
564 free(fb);
565}
566
567/*
568 * Like test_iter_range_simple, this tests both iteration and longest-range
569 * computation.
570 */
571TEST_BEGIN(test_iter_range_exhaustive) {
572#define NB(nbits) \
573 do_test_iter_range_exhaustive(nbits);
574 NBITS_TAB
575#undef NB
576}
577TEST_END
578
579/*
580 * If all set bits in the bitmap are contiguous, in [set_start, set_end),
581 * returns the number of set bits in [scount_start, scount_end).
582 */
583static size_t
584scount_contiguous(size_t set_start, size_t set_end, size_t scount_start,
585 size_t scount_end) {
586 /* No overlap. */
587 if (set_end <= scount_start || scount_end <= set_start) {
588 return 0;
589 }
590 /* set range contains scount range */
591 if (set_start <= scount_start && set_end >= scount_end) {
592 return scount_end - scount_start;
593 }
594 /* scount range contains set range. */
595 if (scount_start <= set_start && scount_end >= set_end) {
596 return set_end - set_start;
597 }
598 /* Partial overlap, with set range starting first. */
599 if (set_start < scount_start && set_end < scount_end) {
600 return set_end - scount_start;
601 }
602 /* Partial overlap, with scount range starting first. */
603 if (scount_start < set_start && scount_end < set_end) {
604 return scount_end - set_start;
605 }
606 /*
607 * Trigger an assert failure; the above list should have been
608 * exhaustive.
609 */
610 unreachable();
611}
612
613static size_t
614ucount_contiguous(size_t set_start, size_t set_end, size_t ucount_start,
615 size_t ucount_end) {
616 /* No overlap. */
617 if (set_end <= ucount_start || ucount_end <= set_start) {
618 return ucount_end - ucount_start;
619 }
620 /* set range contains ucount range */
621 if (set_start <= ucount_start && set_end >= ucount_end) {
622 return 0;
623 }
624 /* ucount range contains set range. */
625 if (ucount_start <= set_start && ucount_end >= set_end) {
626 return (ucount_end - ucount_start) - (set_end - set_start);
627 }
628 /* Partial overlap, with set range starting first. */
629 if (set_start < ucount_start && set_end < ucount_end) {
630 return ucount_end - set_end;
631 }
632 /* Partial overlap, with ucount range starting first. */
633 if (ucount_start < set_start && ucount_end < set_end) {
634 return set_start - ucount_start;
635 }
636 /*
637 * Trigger an assert failure; the above list should have been
638 * exhaustive.
639 */
640 unreachable();
641}
642
643static void
644expect_count_match_contiguous(fb_group_t *fb, size_t nbits, size_t set_start,
645 size_t set_end) {
646 for (size_t i = 0; i < nbits; i++) {
647 for (size_t j = i + 1; j <= nbits; j++) {
648 size_t cnt = j - i;
649 size_t scount_expected = scount_contiguous(set_start,
650 set_end, i, j);
651 size_t scount_computed = fb_scount(fb, nbits, i, cnt);
652 expect_zu_eq(scount_expected, scount_computed,
653 "fb_scount error with nbits=%zu, start=%zu, "
654 "cnt=%zu, with bits set in [%zu, %zu)",
655 nbits, i, cnt, set_start, set_end);
656
657 size_t ucount_expected = ucount_contiguous(set_start,
658 set_end, i, j);
659 size_t ucount_computed = fb_ucount(fb, nbits, i, cnt);
660 assert_zu_eq(ucount_expected, ucount_computed,
661 "fb_ucount error with nbits=%zu, start=%zu, "
662 "cnt=%zu, with bits set in [%zu, %zu)",
663 nbits, i, cnt, set_start, set_end);
664
665 }
666 }
667}
668
669static void
670do_test_count_contiguous(size_t nbits) {
671 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
672 fb_group_t *fb = malloc(sz);
673
674 fb_init(fb, nbits);
675
676 expect_count_match_contiguous(fb, nbits, 0, 0);
677 for (size_t i = 0; i < nbits; i++) {
678 fb_set(fb, nbits, i);
679 expect_count_match_contiguous(fb, nbits, 0, i + 1);
680 }
681
682 for (size_t i = 0; i < nbits; i++) {
683 fb_unset(fb, nbits, i);
684 expect_count_match_contiguous(fb, nbits, i + 1, nbits);
685 }
686
687 free(fb);
688}
689
690TEST_BEGIN(test_count_contiguous_simple) {
691 enum {nbits = 300};
692 fb_group_t fb[FB_NGROUPS(nbits)];
693 fb_init(fb, nbits);
694 /* Just an arbitrary number. */
695 size_t start = 23;
696
697 fb_set_range(fb, nbits, start, 30 - start);
698 expect_count_match_contiguous(fb, nbits, start, 30);
699
700 fb_set_range(fb, nbits, start, 40 - start);
701 expect_count_match_contiguous(fb, nbits, start, 40);
702
703 fb_set_range(fb, nbits, start, 70 - start);
704 expect_count_match_contiguous(fb, nbits, start, 70);
705
706 fb_set_range(fb, nbits, start, 120 - start);
707 expect_count_match_contiguous(fb, nbits, start, 120);
708
709 fb_set_range(fb, nbits, start, 150 - start);
710 expect_count_match_contiguous(fb, nbits, start, 150);
711
712 fb_set_range(fb, nbits, start, 200 - start);
713 expect_count_match_contiguous(fb, nbits, start, 200);
714
715 fb_set_range(fb, nbits, start, 290 - start);
716 expect_count_match_contiguous(fb, nbits, start, 290);
717}
718TEST_END
719
720TEST_BEGIN(test_count_contiguous) {
721#define NB(nbits) \
722 /* This test is *particularly* slow in debug builds. */ \
723 if ((!config_debug && nbits < 300) || nbits < 150) { \
724 do_test_count_contiguous(nbits); \
725 }
726 NBITS_TAB
727#undef NB
728}
729TEST_END
730
731static void
732expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd,
733 size_t nbits) {
734 for (size_t i = 0; i < nbits; i++) {
735 for (size_t j = i + 1; j <= nbits; j++) {
736 size_t cnt = j - i;
737 size_t odd_scount = cnt / 2
738 + (size_t)(cnt % 2 == 1 && i % 2 == 1);
739 size_t odd_scount_computed = fb_scount(fb_odd, nbits,
740 i, j - i);
741 assert_zu_eq(odd_scount, odd_scount_computed,
742 "fb_scount error with nbits=%zu, start=%zu, "
743 "cnt=%zu, with alternating bits set.",
744 nbits, i, j - i);
745
746 size_t odd_ucount = cnt / 2
747 + (size_t)(cnt % 2 == 1 && i % 2 == 0);
748 size_t odd_ucount_computed = fb_ucount(fb_odd, nbits,
749 i, j - i);
750 assert_zu_eq(odd_ucount, odd_ucount_computed,
751 "fb_ucount error with nbits=%zu, start=%zu, "
752 "cnt=%zu, with alternating bits set.",
753 nbits, i, j - i);
754
755 size_t even_scount = cnt / 2
756 + (size_t)(cnt % 2 == 1 && i % 2 == 0);
757 size_t even_scount_computed = fb_scount(fb_even, nbits,
758 i, j - i);
759 assert_zu_eq(even_scount, even_scount_computed,
760 "fb_scount error with nbits=%zu, start=%zu, "
761 "cnt=%zu, with alternating bits set.",
762 nbits, i, j - i);
763
764 size_t even_ucount = cnt / 2
765 + (size_t)(cnt % 2 == 1 && i % 2 == 1);
766 size_t even_ucount_computed = fb_ucount(fb_even, nbits,
767 i, j - i);
768 assert_zu_eq(even_ucount, even_ucount_computed,
769 "fb_ucount error with nbits=%zu, start=%zu, "
770 "cnt=%zu, with alternating bits set.",
771 nbits, i, j - i);
772 }
773 }
774}
775
776static void
777do_test_count_alternating(size_t nbits) {
778 if (nbits > 1000) {
779 return;
780 }
781 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
782 fb_group_t *fb_even = malloc(sz);
783 fb_group_t *fb_odd = malloc(sz);
784
785 fb_init(fb_even, nbits);
786 fb_init(fb_odd, nbits);
787
788 for (size_t i = 0; i < nbits; i++) {
789 if (i % 2 == 0) {
790 fb_set(fb_even, nbits, i);
791 } else {
792 fb_set(fb_odd, nbits, i);
793 }
794 }
795
796 expect_count_match_alternating(fb_even, fb_odd, nbits);
797
798 free(fb_even);
799 free(fb_odd);
800}
801
802TEST_BEGIN(test_count_alternating) {
803#define NB(nbits) \
804 do_test_count_alternating(nbits);
805 NBITS_TAB
806#undef NB
807}
808TEST_END
809
810static void
811do_test_bit_op(size_t nbits, bool (*op)(bool a, bool b),
812 void (*fb_op)(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits)) {
813 size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
814 fb_group_t *fb1 = malloc(sz);
815 fb_group_t *fb2 = malloc(sz);
816 fb_group_t *fb_result = malloc(sz);
817 fb_init(fb1, nbits);
818 fb_init(fb2, nbits);
819 fb_init(fb_result, nbits);
820
821 /* Just two random numbers. */
822 const uint64_t prng_init1 = (uint64_t)0X4E9A9DE6A35691CDULL;
823 const uint64_t prng_init2 = (uint64_t)0X7856E396B063C36EULL;
824
825 uint64_t prng1 = prng_init1;
826 uint64_t prng2 = prng_init2;
827
828 for (size_t i = 0; i < nbits; i++) {
829 bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
830 bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
831
832 if (bit1) {
833 fb_set(fb1, nbits, i);
834 }
835 if (bit2) {
836 fb_set(fb2, nbits, i);
837 }
838
839 if (i % 64 == 0) {
840 prng1 = prng_state_next_u64(prng1);
841 prng2 = prng_state_next_u64(prng2);
842 }
843 }
844
845 fb_op(fb_result, fb1, fb2, nbits);
846
847 /* Reset the prngs to replay them. */
848 prng1 = prng_init1;
849 prng2 = prng_init2;
850
851 for (size_t i = 0; i < nbits; i++) {
852 bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
853 bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
854
855 /* Original bitmaps shouldn't change. */
856 expect_b_eq(bit1, fb_get(fb1, nbits, i), "difference at bit %zu", i);
857 expect_b_eq(bit2, fb_get(fb2, nbits, i), "difference at bit %zu", i);
858
859 /* New one should be bitwise and. */
860 expect_b_eq(op(bit1, bit2), fb_get(fb_result, nbits, i),
861 "difference at bit %zu", i);
862
863 /* Update the same way we did last time. */
864 if (i % 64 == 0) {
865 prng1 = prng_state_next_u64(prng1);
866 prng2 = prng_state_next_u64(prng2);
867 }
868 }
869
870 free(fb1);
871 free(fb2);
872 free(fb_result);
873}
874
875static bool
876binary_and(bool a, bool b) {
877 return a & b;
878}
879
880static void
881do_test_bit_and(size_t nbits) {
882 do_test_bit_op(nbits, &binary_and, &fb_bit_and);
883}
884
885TEST_BEGIN(test_bit_and) {
886#define NB(nbits) \
887 do_test_bit_and(nbits);
888 NBITS_TAB
889#undef NB
890}
891TEST_END
892
893static bool
894binary_or(bool a, bool b) {
895 return a | b;
896}
897
898static void
899do_test_bit_or(size_t nbits) {
900 do_test_bit_op(nbits, &binary_or, &fb_bit_or);
901}
902
903TEST_BEGIN(test_bit_or) {
904#define NB(nbits) \
905 do_test_bit_or(nbits);
906 NBITS_TAB
907#undef NB
908}
909TEST_END
910
911static bool
912binary_not(bool a, bool b) {
913 (void)b;
914 return !a;
915}
916
917static void
918fb_bit_not_shim(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2,
919 size_t nbits) {
920 (void)src2;
921 fb_bit_not(dst, src1, nbits);
922}
923
924static void
925do_test_bit_not(size_t nbits) {
926 do_test_bit_op(nbits, &binary_not, &fb_bit_not_shim);
927}
928
929TEST_BEGIN(test_bit_not) {
930#define NB(nbits) \
931 do_test_bit_not(nbits);
932 NBITS_TAB
933#undef NB
934}
935TEST_END
936
937int
938main(void) {
939 return test_no_reentrancy(
940 test_fb_init,
941 test_get_set_unset,
942 test_search_simple,
943 test_search_exhaustive,
944 test_range_simple,
945 test_empty_full,
946 test_iter_range_simple,
947 test_iter_range_exhaustive,
948 test_count_contiguous_simple,
949 test_count_contiguous,
950 test_count_alternating,
951 test_bit_and,
952 test_bit_or,
953 test_bit_not);
954}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/fork.c b/examples/redis-unstable/deps/jemalloc/test/unit/fork.c
deleted file mode 100644
index 4137423..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/fork.c
+++ /dev/null
@@ -1,141 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#ifndef _WIN32
4#include <sys/wait.h>
5#endif
6
7#ifndef _WIN32
8static void
9wait_for_child_exit(int pid) {
10 int status;
11 while (true) {
12 if (waitpid(pid, &status, 0) == -1) {
13 test_fail("Unexpected waitpid() failure.");
14 }
15 if (WIFSIGNALED(status)) {
16 test_fail("Unexpected child termination due to "
17 "signal %d", WTERMSIG(status));
18 break;
19 }
20 if (WIFEXITED(status)) {
21 if (WEXITSTATUS(status) != 0) {
22 test_fail("Unexpected child exit value %d",
23 WEXITSTATUS(status));
24 }
25 break;
26 }
27 }
28}
29#endif
30
31TEST_BEGIN(test_fork) {
32#ifndef _WIN32
33 void *p;
34 pid_t pid;
35
36 /* Set up a manually managed arena for test. */
37 unsigned arena_ind;
38 size_t sz = sizeof(unsigned);
39 expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
40 0, "Unexpected mallctl() failure");
41
42 /* Migrate to the new arena. */
43 unsigned old_arena_ind;
44 sz = sizeof(old_arena_ind);
45 expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
46 (void *)&arena_ind, sizeof(arena_ind)), 0,
47 "Unexpected mallctl() failure");
48
49 p = malloc(1);
50 expect_ptr_not_null(p, "Unexpected malloc() failure");
51
52 pid = fork();
53
54 free(p);
55
56 p = malloc(64);
57 expect_ptr_not_null(p, "Unexpected malloc() failure");
58 free(p);
59
60 if (pid == -1) {
61 /* Error. */
62 test_fail("Unexpected fork() failure");
63 } else if (pid == 0) {
64 /* Child. */
65 _exit(0);
66 } else {
67 wait_for_child_exit(pid);
68 }
69#else
70 test_skip("fork(2) is irrelevant to Windows");
71#endif
72}
73TEST_END
74
75#ifndef _WIN32
76static void *
77do_fork_thd(void *arg) {
78 malloc(1);
79 int pid = fork();
80 if (pid == -1) {
81 /* Error. */
82 test_fail("Unexpected fork() failure");
83 } else if (pid == 0) {
84 /* Child. */
85 char *args[] = {"true", NULL};
86 execvp(args[0], args);
87 test_fail("Exec failed");
88 } else {
89 /* Parent */
90 wait_for_child_exit(pid);
91 }
92 return NULL;
93}
94#endif
95
96#ifndef _WIN32
97static void
98do_test_fork_multithreaded() {
99 thd_t child;
100 thd_create(&child, do_fork_thd, NULL);
101 do_fork_thd(NULL);
102 thd_join(child, NULL);
103}
104#endif
105
106TEST_BEGIN(test_fork_multithreaded) {
107#ifndef _WIN32
108 /*
109 * We've seen bugs involving hanging on arenas_lock (though the same
110 * class of bugs can happen on any mutex). The bugs are intermittent
111 * though, so we want to run the test multiple times. Since we hold the
112 * arenas lock only early in the process lifetime, we can't just run
113 * this test in a loop (since, after all the arenas are initialized, we
114 * won't acquire arenas_lock any further). We therefore repeat the test
115 * with multiple processes.
116 */
117 for (int i = 0; i < 100; i++) {
118 int pid = fork();
119 if (pid == -1) {
120 /* Error. */
121 test_fail("Unexpected fork() failure,");
122 } else if (pid == 0) {
123 /* Child. */
124 do_test_fork_multithreaded();
125 _exit(0);
126 } else {
127 wait_for_child_exit(pid);
128 }
129 }
130#else
131 test_skip("fork(2) is irrelevant to Windows");
132#endif
133}
134TEST_END
135
136int
137main(void) {
138 return test_no_reentrancy(
139 test_fork,
140 test_fork_multithreaded);
141}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/fxp.c b/examples/redis-unstable/deps/jemalloc/test/unit/fxp.c
deleted file mode 100644
index 27f1097..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/fxp.c
+++ /dev/null
@@ -1,394 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/fxp.h"
4
5static double
6fxp2double(fxp_t a) {
7 double intpart = (double)(a >> 16);
8 double fracpart = (double)(a & ((1U << 16) - 1)) / (1U << 16);
9 return intpart + fracpart;
10}
11
12/* Is a close to b? */
13static bool
14double_close(double a, double b) {
15 /*
16 * Our implementation doesn't try for precision. Correspondingly, don't
17 * enforce it too strenuously here; accept values that are close in
18 * either relative or absolute terms.
19 */
20 return fabs(a - b) < 0.01 || fabs(a - b) / a < 0.01;
21}
22
23static bool
24fxp_close(fxp_t a, fxp_t b) {
25 return double_close(fxp2double(a), fxp2double(b));
26}
27
28static fxp_t
29xparse_fxp(const char *str) {
30 fxp_t result;
31 bool err = fxp_parse(&result, str, NULL);
32 assert_false(err, "Invalid fxp string: %s", str);
33 return result;
34}
35
36static void
37expect_parse_accurate(const char *str, const char *parse_str) {
38 double true_val = strtod(str, NULL);
39 fxp_t fxp_val;
40 char *end;
41 bool err = fxp_parse(&fxp_val, parse_str, &end);
42 expect_false(err, "Unexpected parse failure");
43 expect_ptr_eq(parse_str + strlen(str), end,
44 "Didn't parse whole string");
45 expect_true(double_close(fxp2double(fxp_val), true_val),
46 "Misparsed %s", str);
47}
48
49static void
50parse_valid_trial(const char *str) {
51 /* The value it parses should be correct. */
52 expect_parse_accurate(str, str);
53 char buf[100];
54 snprintf(buf, sizeof(buf), "%swith_some_trailing_text", str);
55 expect_parse_accurate(str, buf);
56 snprintf(buf, sizeof(buf), "%s with a space", str);
57 expect_parse_accurate(str, buf);
58 snprintf(buf, sizeof(buf), "%s,in_a_malloc_conf_string:1", str);
59 expect_parse_accurate(str, buf);
60}
61
62TEST_BEGIN(test_parse_valid) {
63 parse_valid_trial("0");
64 parse_valid_trial("1");
65 parse_valid_trial("2");
66 parse_valid_trial("100");
67 parse_valid_trial("345");
68 parse_valid_trial("00000000123");
69 parse_valid_trial("00000000987");
70
71 parse_valid_trial("0.0");
72 parse_valid_trial("0.00000000000456456456");
73 parse_valid_trial("100.00000000000456456456");
74
75 parse_valid_trial("123.1");
76 parse_valid_trial("123.01");
77 parse_valid_trial("123.001");
78 parse_valid_trial("123.0001");
79 parse_valid_trial("123.00001");
80 parse_valid_trial("123.000001");
81 parse_valid_trial("123.0000001");
82
83 parse_valid_trial(".0");
84 parse_valid_trial(".1");
85 parse_valid_trial(".01");
86 parse_valid_trial(".001");
87 parse_valid_trial(".0001");
88 parse_valid_trial(".00001");
89 parse_valid_trial(".000001");
90
91 parse_valid_trial(".1");
92 parse_valid_trial(".10");
93 parse_valid_trial(".100");
94 parse_valid_trial(".1000");
95 parse_valid_trial(".100000");
96}
97TEST_END
98
99static void
100expect_parse_failure(const char *str) {
101 fxp_t result = FXP_INIT_INT(333);
102 char *end = (void *)0x123;
103 bool err = fxp_parse(&result, str, &end);
104 expect_true(err, "Expected a parse error on: %s", str);
105 expect_ptr_eq((void *)0x123, end,
106 "Parse error shouldn't change results");
107 expect_u32_eq(result, FXP_INIT_INT(333),
108 "Parse error shouldn't change results");
109}
110
111TEST_BEGIN(test_parse_invalid) {
112 expect_parse_failure("123.");
113 expect_parse_failure("3.a");
114 expect_parse_failure(".a");
115 expect_parse_failure("a.1");
116 expect_parse_failure("a");
117 /* A valid string, but one that overflows. */
118 expect_parse_failure("123456789");
119 expect_parse_failure("0000000123456789");
120 expect_parse_failure("1000000");
121}
122TEST_END
123
124static void
125expect_init_percent(unsigned percent, const char *str) {
126 fxp_t result_init = FXP_INIT_PERCENT(percent);
127 fxp_t result_parse = xparse_fxp(str);
128 expect_u32_eq(result_init, result_parse,
129 "Expect representations of FXP_INIT_PERCENT(%u) and "
130 "fxp_parse(\"%s\") to be equal; got %x and %x",
131 percent, str, result_init, result_parse);
132
133}
134
135/*
136 * Every other test uses either parsing or FXP_INIT_INT; it gets tested in those
137 * ways. We need a one-off for the percent-based initialization, though.
138 */
139TEST_BEGIN(test_init_percent) {
140 expect_init_percent(100, "1");
141 expect_init_percent(75, ".75");
142 expect_init_percent(1, ".01");
143 expect_init_percent(50, ".5");
144}
145TEST_END
146
147static void
148expect_add(const char *astr, const char *bstr, const char* resultstr) {
149 fxp_t a = xparse_fxp(astr);
150 fxp_t b = xparse_fxp(bstr);
151 fxp_t result = xparse_fxp(resultstr);
152 expect_true(fxp_close(fxp_add(a, b), result),
153 "Expected %s + %s == %s", astr, bstr, resultstr);
154}
155
156TEST_BEGIN(test_add_simple) {
157 expect_add("0", "0", "0");
158 expect_add("0", "1", "1");
159 expect_add("1", "1", "2");
160 expect_add("1.5", "1.5", "3");
161 expect_add("0.1", "0.1", "0.2");
162 expect_add("123", "456", "579");
163}
164TEST_END
165
166static void
167expect_sub(const char *astr, const char *bstr, const char* resultstr) {
168 fxp_t a = xparse_fxp(astr);
169 fxp_t b = xparse_fxp(bstr);
170 fxp_t result = xparse_fxp(resultstr);
171 expect_true(fxp_close(fxp_sub(a, b), result),
172 "Expected %s - %s == %s", astr, bstr, resultstr);
173}
174
175TEST_BEGIN(test_sub_simple) {
176 expect_sub("0", "0", "0");
177 expect_sub("1", "0", "1");
178 expect_sub("1", "1", "0");
179 expect_sub("3.5", "1.5", "2");
180 expect_sub("0.3", "0.1", "0.2");
181 expect_sub("456", "123", "333");
182}
183TEST_END
184
185static void
186expect_mul(const char *astr, const char *bstr, const char* resultstr) {
187 fxp_t a = xparse_fxp(astr);
188 fxp_t b = xparse_fxp(bstr);
189 fxp_t result = xparse_fxp(resultstr);
190 expect_true(fxp_close(fxp_mul(a, b), result),
191 "Expected %s * %s == %s", astr, bstr, resultstr);
192}
193
194TEST_BEGIN(test_mul_simple) {
195 expect_mul("0", "0", "0");
196 expect_mul("1", "0", "0");
197 expect_mul("1", "1", "1");
198 expect_mul("1.5", "1.5", "2.25");
199 expect_mul("100.0", "10", "1000");
200 expect_mul(".1", "10", "1");
201}
202TEST_END
203
204static void
205expect_div(const char *astr, const char *bstr, const char* resultstr) {
206 fxp_t a = xparse_fxp(astr);
207 fxp_t b = xparse_fxp(bstr);
208 fxp_t result = xparse_fxp(resultstr);
209 expect_true(fxp_close(fxp_div(a, b), result),
210 "Expected %s / %s == %s", astr, bstr, resultstr);
211}
212
213TEST_BEGIN(test_div_simple) {
214 expect_div("1", "1", "1");
215 expect_div("0", "1", "0");
216 expect_div("2", "1", "2");
217 expect_div("3", "2", "1.5");
218 expect_div("3", "1.5", "2");
219 expect_div("10", ".1", "100");
220 expect_div("123", "456", ".2697368421");
221}
222TEST_END
223
224static void
225expect_round(const char *str, uint32_t rounded_down, uint32_t rounded_nearest) {
226 fxp_t fxp = xparse_fxp(str);
227 uint32_t fxp_rounded_down = fxp_round_down(fxp);
228 uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp);
229 expect_u32_eq(rounded_down, fxp_rounded_down,
230 "Mistake rounding %s down", str);
231 expect_u32_eq(rounded_nearest, fxp_rounded_nearest,
232 "Mistake rounding %s to nearest", str);
233}
234
235TEST_BEGIN(test_round_simple) {
236 expect_round("1.5", 1, 2);
237 expect_round("0", 0, 0);
238 expect_round("0.1", 0, 0);
239 expect_round("0.4", 0, 0);
240 expect_round("0.40000", 0, 0);
241 expect_round("0.5", 0, 1);
242 expect_round("0.6", 0, 1);
243 expect_round("123", 123, 123);
244 expect_round("123.4", 123, 123);
245 expect_round("123.5", 123, 124);
246}
247TEST_END
248
249static void
250expect_mul_frac(size_t a, const char *fracstr, size_t expected) {
251 fxp_t frac = xparse_fxp(fracstr);
252 size_t result = fxp_mul_frac(a, frac);
253 expect_true(double_close(expected, result),
254 "Expected %zu * %s == %zu (fracmul); got %zu", a, fracstr,
255 expected, result);
256}
257
258TEST_BEGIN(test_mul_frac_simple) {
259 expect_mul_frac(SIZE_MAX, "1.0", SIZE_MAX);
260 expect_mul_frac(SIZE_MAX, ".75", SIZE_MAX / 4 * 3);
261 expect_mul_frac(SIZE_MAX, ".5", SIZE_MAX / 2);
262 expect_mul_frac(SIZE_MAX, ".25", SIZE_MAX / 4);
263 expect_mul_frac(1U << 16, "1.0", 1U << 16);
264 expect_mul_frac(1U << 30, "0.5", 1U << 29);
265 expect_mul_frac(1U << 30, "0.25", 1U << 28);
266 expect_mul_frac(1U << 30, "0.125", 1U << 27);
267 expect_mul_frac((1U << 30) + 1, "0.125", 1U << 27);
268 expect_mul_frac(100, "0.25", 25);
269 expect_mul_frac(1000 * 1000, "0.001", 1000);
270}
271TEST_END
272
273static void
274expect_print(const char *str) {
275 fxp_t fxp = xparse_fxp(str);
276 char buf[FXP_BUF_SIZE];
277 fxp_print(fxp, buf);
278 expect_d_eq(0, strcmp(str, buf), "Couldn't round-trip print %s", str);
279}
280
281TEST_BEGIN(test_print_simple) {
282 expect_print("0.0");
283 expect_print("1.0");
284 expect_print("2.0");
285 expect_print("123.0");
286 /*
287 * We hit the possibility of roundoff errors whenever the fractional
288 * component isn't a round binary number; only check these here (we
289 * round-trip properly in the stress test).
290 */
291 expect_print("1.5");
292 expect_print("3.375");
293 expect_print("0.25");
294 expect_print("0.125");
295 /* 1 / 2**14 */
296 expect_print("0.00006103515625");
297}
298TEST_END
299
300TEST_BEGIN(test_stress) {
301 const char *numbers[] = {
302 "0.0", "0.1", "0.2", "0.3", "0.4",
303 "0.5", "0.6", "0.7", "0.8", "0.9",
304
305 "1.0", "1.1", "1.2", "1.3", "1.4",
306 "1.5", "1.6", "1.7", "1.8", "1.9",
307
308 "2.0", "2.1", "2.2", "2.3", "2.4",
309 "2.5", "2.6", "2.7", "2.8", "2.9",
310
311 "17.0", "17.1", "17.2", "17.3", "17.4",
312 "17.5", "17.6", "17.7", "17.8", "17.9",
313
314 "18.0", "18.1", "18.2", "18.3", "18.4",
315 "18.5", "18.6", "18.7", "18.8", "18.9",
316
317 "123.0", "123.1", "123.2", "123.3", "123.4",
318 "123.5", "123.6", "123.7", "123.8", "123.9",
319
320 "124.0", "124.1", "124.2", "124.3", "124.4",
321 "124.5", "124.6", "124.7", "124.8", "124.9",
322
323 "125.0", "125.1", "125.2", "125.3", "125.4",
324 "125.5", "125.6", "125.7", "125.8", "125.9"};
325 size_t numbers_len = sizeof(numbers)/sizeof(numbers[0]);
326 for (size_t i = 0; i < numbers_len; i++) {
327 fxp_t fxp_a = xparse_fxp(numbers[i]);
328 double double_a = strtod(numbers[i], NULL);
329
330 uint32_t fxp_rounded_down = fxp_round_down(fxp_a);
331 uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp_a);
332 uint32_t double_rounded_down = (uint32_t)double_a;
333 uint32_t double_rounded_nearest = (uint32_t)round(double_a);
334
335 expect_u32_eq(double_rounded_down, fxp_rounded_down,
336 "Incorrectly rounded down %s", numbers[i]);
337 expect_u32_eq(double_rounded_nearest, fxp_rounded_nearest,
338 "Incorrectly rounded-to-nearest %s", numbers[i]);
339
340 for (size_t j = 0; j < numbers_len; j++) {
341 fxp_t fxp_b = xparse_fxp(numbers[j]);
342 double double_b = strtod(numbers[j], NULL);
343
344 fxp_t fxp_sum = fxp_add(fxp_a, fxp_b);
345 double double_sum = double_a + double_b;
346 expect_true(
347 double_close(fxp2double(fxp_sum), double_sum),
348 "Miscomputed %s + %s", numbers[i], numbers[j]);
349
350 if (double_a > double_b) {
351 fxp_t fxp_diff = fxp_sub(fxp_a, fxp_b);
352 double double_diff = double_a - double_b;
353 expect_true(
354 double_close(fxp2double(fxp_diff),
355 double_diff),
356 "Miscomputed %s - %s", numbers[i],
357 numbers[j]);
358 }
359
360 fxp_t fxp_prod = fxp_mul(fxp_a, fxp_b);
361 double double_prod = double_a * double_b;
362 expect_true(
363 double_close(fxp2double(fxp_prod), double_prod),
364 "Miscomputed %s * %s", numbers[i], numbers[j]);
365
366 if (double_b != 0.0) {
367 fxp_t fxp_quot = fxp_div(fxp_a, fxp_b);
368 double double_quot = double_a / double_b;
369 expect_true(
370 double_close(fxp2double(fxp_quot),
371 double_quot),
372 "Miscomputed %s / %s", numbers[i],
373 numbers[j]);
374 }
375 }
376 }
377}
378TEST_END
379
380int
381main(void) {
382 return test_no_reentrancy(
383 test_parse_valid,
384 test_parse_invalid,
385 test_init_percent,
386 test_add_simple,
387 test_sub_simple,
388 test_mul_simple,
389 test_div_simple,
390 test_round_simple,
391 test_mul_frac_simple,
392 test_print_simple,
393 test_stress);
394}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/hash.c b/examples/redis-unstable/deps/jemalloc/test/unit/hash.c
deleted file mode 100644
index 49f0823..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/hash.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * This file is based on code that is part of SMHasher
3 * (https://code.google.com/p/smhasher/), and is subject to the MIT license
4 * (http://www.opensource.org/licenses/mit-license.php). Both email addresses
5 * associated with the source code's revision history belong to Austin Appleby,
6 * and the revision history ranges from 2010 to 2012. Therefore the copyright
7 * and license are here taken to be:
8 *
9 * Copyright (c) 2010-2012 Austin Appleby
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30#include "test/jemalloc_test.h"
31#include "jemalloc/internal/hash.h"
32
33typedef enum {
34 hash_variant_x86_32,
35 hash_variant_x86_128,
36 hash_variant_x64_128
37} hash_variant_t;
38
39static int
40hash_variant_bits(hash_variant_t variant) {
41 switch (variant) {
42 case hash_variant_x86_32: return 32;
43 case hash_variant_x86_128: return 128;
44 case hash_variant_x64_128: return 128;
45 default: not_reached();
46 }
47}
48
49static const char *
50hash_variant_string(hash_variant_t variant) {
51 switch (variant) {
52 case hash_variant_x86_32: return "hash_x86_32";
53 case hash_variant_x86_128: return "hash_x86_128";
54 case hash_variant_x64_128: return "hash_x64_128";
55 default: not_reached();
56 }
57}
58
59#define KEY_SIZE 256
60static void
61hash_variant_verify_key(hash_variant_t variant, uint8_t *key) {
62 const int hashbytes = hash_variant_bits(variant) / 8;
63 const int hashes_size = hashbytes * 256;
64 VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
65 VARIABLE_ARRAY(uint8_t, final, hashbytes);
66 unsigned i;
67 uint32_t computed, expected;
68
69 memset(key, 0, KEY_SIZE);
70 memset(hashes, 0, hashes_size);
71 memset(final, 0, hashbytes);
72
73 /*
74 * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
75 * seed.
76 */
77 for (i = 0; i < 256; i++) {
78 key[i] = (uint8_t)i;
79 switch (variant) {
80 case hash_variant_x86_32: {
81 uint32_t out;
82 out = hash_x86_32(key, i, 256-i);
83 memcpy(&hashes[i*hashbytes], &out, hashbytes);
84 break;
85 } case hash_variant_x86_128: {
86 uint64_t out[2];
87 hash_x86_128(key, i, 256-i, out);
88 memcpy(&hashes[i*hashbytes], out, hashbytes);
89 break;
90 } case hash_variant_x64_128: {
91 uint64_t out[2];
92 hash_x64_128(key, i, 256-i, out);
93 memcpy(&hashes[i*hashbytes], out, hashbytes);
94 break;
95 } default: not_reached();
96 }
97 }
98
99 /* Hash the result array. */
100 switch (variant) {
101 case hash_variant_x86_32: {
102 uint32_t out = hash_x86_32(hashes, hashes_size, 0);
103 memcpy(final, &out, sizeof(out));
104 break;
105 } case hash_variant_x86_128: {
106 uint64_t out[2];
107 hash_x86_128(hashes, hashes_size, 0, out);
108 memcpy(final, out, sizeof(out));
109 break;
110 } case hash_variant_x64_128: {
111 uint64_t out[2];
112 hash_x64_128(hashes, hashes_size, 0, out);
113 memcpy(final, out, sizeof(out));
114 break;
115 } default: not_reached();
116 }
117
118 computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
119 (final[3] << 24);
120
121 switch (variant) {
122#ifdef JEMALLOC_BIG_ENDIAN
123 case hash_variant_x86_32: expected = 0x6213303eU; break;
124 case hash_variant_x86_128: expected = 0x266820caU; break;
125 case hash_variant_x64_128: expected = 0xcc622b6fU; break;
126#else
127 case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
128 case hash_variant_x86_128: expected = 0xb3ece62aU; break;
129 case hash_variant_x64_128: expected = 0x6384ba69U; break;
130#endif
131 default: not_reached();
132 }
133
134 expect_u32_eq(computed, expected,
135 "Hash mismatch for %s(): expected %#x but got %#x",
136 hash_variant_string(variant), expected, computed);
137}
138
139static void
140hash_variant_verify(hash_variant_t variant) {
141#define MAX_ALIGN 16
142 uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
143 unsigned i;
144
145 for (i = 0; i < MAX_ALIGN; i++) {
146 hash_variant_verify_key(variant, &key[i]);
147 }
148#undef MAX_ALIGN
149}
150#undef KEY_SIZE
151
152TEST_BEGIN(test_hash_x86_32) {
153 hash_variant_verify(hash_variant_x86_32);
154}
155TEST_END
156
157TEST_BEGIN(test_hash_x86_128) {
158 hash_variant_verify(hash_variant_x86_128);
159}
160TEST_END
161
162TEST_BEGIN(test_hash_x64_128) {
163 hash_variant_verify(hash_variant_x64_128);
164}
165TEST_END
166
167int
168main(void) {
169 return test(
170 test_hash_x86_32,
171 test_hash_x86_128,
172 test_hash_x64_128);
173}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/hook.c b/examples/redis-unstable/deps/jemalloc/test/unit/hook.c
deleted file mode 100644
index 16a6f1b..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/hook.c
+++ /dev/null
@@ -1,586 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/hook.h"
4
5static void *arg_extra;
6static int arg_type;
7static void *arg_result;
8static void *arg_address;
9static size_t arg_old_usize;
10static size_t arg_new_usize;
11static uintptr_t arg_result_raw;
12static uintptr_t arg_args_raw[4];
13
14static int call_count = 0;
15
16static void
17reset_args() {
18 arg_extra = NULL;
19 arg_type = 12345;
20 arg_result = NULL;
21 arg_address = NULL;
22 arg_old_usize = 0;
23 arg_new_usize = 0;
24 arg_result_raw = 0;
25 memset(arg_args_raw, 77, sizeof(arg_args_raw));
26}
27
28static void
29alloc_free_size(size_t sz) {
30 void *ptr = mallocx(1, 0);
31 free(ptr);
32 ptr = mallocx(1, 0);
33 free(ptr);
34 ptr = mallocx(1, MALLOCX_TCACHE_NONE);
35 dallocx(ptr, MALLOCX_TCACHE_NONE);
36}
37
38/*
39 * We want to support a degree of user reentrancy. This tests a variety of
40 * allocation scenarios.
41 */
42static void
43be_reentrant() {
44 /* Let's make sure the tcache is non-empty if enabled. */
45 alloc_free_size(1);
46 alloc_free_size(1024);
47 alloc_free_size(64 * 1024);
48 alloc_free_size(256 * 1024);
49 alloc_free_size(1024 * 1024);
50
51 /* Some reallocation. */
52 void *ptr = mallocx(129, 0);
53 ptr = rallocx(ptr, 130, 0);
54 free(ptr);
55
56 ptr = mallocx(2 * 1024 * 1024, 0);
57 free(ptr);
58 ptr = mallocx(1 * 1024 * 1024, 0);
59 ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
60 free(ptr);
61
62 ptr = mallocx(1, 0);
63 ptr = rallocx(ptr, 1000, 0);
64 free(ptr);
65}
66
67static void
68set_args_raw(uintptr_t *args_raw, int nargs) {
69 memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
70}
71
72static void
73expect_args_raw(uintptr_t *args_raw_expected, int nargs) {
74 int cmp = memcmp(args_raw_expected, arg_args_raw,
75 sizeof(uintptr_t) * nargs);
76 expect_d_eq(cmp, 0, "Raw args mismatch");
77}
78
79static void
80reset() {
81 call_count = 0;
82 reset_args();
83}
84
85static void
86test_alloc_hook(void *extra, hook_alloc_t type, void *result,
87 uintptr_t result_raw, uintptr_t args_raw[3]) {
88 call_count++;
89 arg_extra = extra;
90 arg_type = (int)type;
91 arg_result = result;
92 arg_result_raw = result_raw;
93 set_args_raw(args_raw, 3);
94 be_reentrant();
95}
96
97static void
98test_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
99 uintptr_t args_raw[3]) {
100 call_count++;
101 arg_extra = extra;
102 arg_type = (int)type;
103 arg_address = address;
104 set_args_raw(args_raw, 3);
105 be_reentrant();
106}
107
108static void
109test_expand_hook(void *extra, hook_expand_t type, void *address,
110 size_t old_usize, size_t new_usize, uintptr_t result_raw,
111 uintptr_t args_raw[4]) {
112 call_count++;
113 arg_extra = extra;
114 arg_type = (int)type;
115 arg_address = address;
116 arg_old_usize = old_usize;
117 arg_new_usize = new_usize;
118 arg_result_raw = result_raw;
119 set_args_raw(args_raw, 4);
120 be_reentrant();
121}
122
123TEST_BEGIN(test_hooks_basic) {
124 /* Just verify that the record their arguments correctly. */
125 hooks_t hooks = {
126 &test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
127 (void *)111};
128 void *handle = hook_install(TSDN_NULL, &hooks);
129 uintptr_t args_raw[4] = {10, 20, 30, 40};
130
131 /* Alloc */
132 reset_args();
133 hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333,
134 args_raw);
135 expect_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
136 expect_d_eq((int)hook_alloc_posix_memalign, arg_type,
137 "Passed wrong alloc type");
138 expect_ptr_eq((void *)222, arg_result, "Passed wrong result address");
139 expect_u64_eq(333, arg_result_raw, "Passed wrong result");
140 expect_args_raw(args_raw, 3);
141
142 /* Dalloc */
143 reset_args();
144 hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
145 expect_d_eq((int)hook_dalloc_sdallocx, arg_type,
146 "Passed wrong dalloc type");
147 expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
148 expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
149 expect_args_raw(args_raw, 3);
150
151 /* Expand */
152 reset_args();
153 hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555,
154 args_raw);
155 expect_d_eq((int)hook_expand_xallocx, arg_type,
156 "Passed wrong expand type");
157 expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
158 expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
159 expect_zu_eq(333, arg_old_usize, "Passed wrong old usize");
160 expect_zu_eq(444, arg_new_usize, "Passed wrong new usize");
161 expect_zu_eq(555, arg_result_raw, "Passed wrong result");
162 expect_args_raw(args_raw, 4);
163
164 hook_remove(TSDN_NULL, handle);
165}
166TEST_END
167
168TEST_BEGIN(test_hooks_null) {
169 /* Null hooks should be ignored, not crash. */
170 hooks_t hooks1 = {NULL, NULL, NULL, NULL};
171 hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL};
172 hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL};
173 hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL};
174
175 void *handle1 = hook_install(TSDN_NULL, &hooks1);
176 void *handle2 = hook_install(TSDN_NULL, &hooks2);
177 void *handle3 = hook_install(TSDN_NULL, &hooks3);
178 void *handle4 = hook_install(TSDN_NULL, &hooks4);
179
180 expect_ptr_ne(handle1, NULL, "Hook installation failed");
181 expect_ptr_ne(handle2, NULL, "Hook installation failed");
182 expect_ptr_ne(handle3, NULL, "Hook installation failed");
183 expect_ptr_ne(handle4, NULL, "Hook installation failed");
184
185 uintptr_t args_raw[4] = {10, 20, 30, 40};
186
187 call_count = 0;
188 hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
189 expect_d_eq(call_count, 1, "Called wrong number of times");
190
191 call_count = 0;
192 hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
193 expect_d_eq(call_count, 1, "Called wrong number of times");
194
195 call_count = 0;
196 hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
197 expect_d_eq(call_count, 1, "Called wrong number of times");
198
199 hook_remove(TSDN_NULL, handle1);
200 hook_remove(TSDN_NULL, handle2);
201 hook_remove(TSDN_NULL, handle3);
202 hook_remove(TSDN_NULL, handle4);
203}
204TEST_END
205
206TEST_BEGIN(test_hooks_remove) {
207 hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
208 void *handle = hook_install(TSDN_NULL, &hooks);
209 expect_ptr_ne(handle, NULL, "Hook installation failed");
210 call_count = 0;
211 uintptr_t args_raw[4] = {10, 20, 30, 40};
212 hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
213 expect_d_eq(call_count, 1, "Hook not invoked");
214
215 call_count = 0;
216 hook_remove(TSDN_NULL, handle);
217 hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
218 expect_d_eq(call_count, 0, "Hook invoked after removal");
219
220}
221TEST_END
222
223TEST_BEGIN(test_hooks_alloc_simple) {
224 /* "Simple" in the sense that we're not in a realloc variant. */
225 hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
226 void *handle = hook_install(TSDN_NULL, &hooks);
227 expect_ptr_ne(handle, NULL, "Hook installation failed");
228
229 /* Stop malloc from being optimized away. */
230 volatile int err;
231 void *volatile ptr;
232
233 /* malloc */
234 reset();
235 ptr = malloc(1);
236 expect_d_eq(call_count, 1, "Hook not called");
237 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
238 expect_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
239 expect_ptr_eq(ptr, arg_result, "Wrong result");
240 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
241 "Wrong raw result");
242 expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
243 free(ptr);
244
245 /* posix_memalign */
246 reset();
247 err = posix_memalign((void **)&ptr, 1024, 1);
248 expect_d_eq(call_count, 1, "Hook not called");
249 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
250 expect_d_eq(arg_type, (int)hook_alloc_posix_memalign,
251 "Wrong hook type");
252 expect_ptr_eq(ptr, arg_result, "Wrong result");
253 expect_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
254 "Wrong raw result");
255 expect_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
256 expect_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
257 expect_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
258 free(ptr);
259
260 /* aligned_alloc */
261 reset();
262 ptr = aligned_alloc(1024, 1);
263 expect_d_eq(call_count, 1, "Hook not called");
264 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
265 expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
266 "Wrong hook type");
267 expect_ptr_eq(ptr, arg_result, "Wrong result");
268 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
269 "Wrong raw result");
270 expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
271 expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
272 free(ptr);
273
274 /* calloc */
275 reset();
276 ptr = calloc(11, 13);
277 expect_d_eq(call_count, 1, "Hook not called");
278 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
279 expect_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
280 expect_ptr_eq(ptr, arg_result, "Wrong result");
281 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
282 "Wrong raw result");
283 expect_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
284 expect_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
285 free(ptr);
286
287 /* memalign */
288#ifdef JEMALLOC_OVERRIDE_MEMALIGN
289 reset();
290 ptr = memalign(1024, 1);
291 expect_d_eq(call_count, 1, "Hook not called");
292 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
293 expect_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
294 expect_ptr_eq(ptr, arg_result, "Wrong result");
295 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
296 "Wrong raw result");
297 expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
298 expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
299 free(ptr);
300#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
301
302 /* valloc */
303#ifdef JEMALLOC_OVERRIDE_VALLOC
304 reset();
305 ptr = valloc(1);
306 expect_d_eq(call_count, 1, "Hook not called");
307 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
308 expect_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
309 expect_ptr_eq(ptr, arg_result, "Wrong result");
310 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
311 "Wrong raw result");
312 expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
313 free(ptr);
314#endif /* JEMALLOC_OVERRIDE_VALLOC */
315
316 /* mallocx */
317 reset();
318 ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
319 expect_d_eq(call_count, 1, "Hook not called");
320 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
321 expect_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
322 expect_ptr_eq(ptr, arg_result, "Wrong result");
323 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
324 "Wrong raw result");
325 expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
326 expect_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
327 "Wrong flags");
328 free(ptr);
329
330 hook_remove(TSDN_NULL, handle);
331}
332TEST_END
333
334TEST_BEGIN(test_hooks_dalloc_simple) {
335 /* "Simple" in the sense that we're not in a realloc variant. */
336 hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
337 void *handle = hook_install(TSDN_NULL, &hooks);
338 expect_ptr_ne(handle, NULL, "Hook installation failed");
339
340 void *volatile ptr;
341
342 /* free() */
343 reset();
344 ptr = malloc(1);
345 free(ptr);
346 expect_d_eq(call_count, 1, "Hook not called");
347 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
348 expect_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
349 expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
350 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
351
352 /* dallocx() */
353 reset();
354 ptr = malloc(1);
355 dallocx(ptr, MALLOCX_TCACHE_NONE);
356 expect_d_eq(call_count, 1, "Hook not called");
357 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
358 expect_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
359 expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
360 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
361 expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
362 "Wrong raw arg");
363
364 /* sdallocx() */
365 reset();
366 ptr = malloc(1);
367 sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
368 expect_d_eq(call_count, 1, "Hook not called");
369 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
370 expect_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
371 expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
372 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
373 expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
374 expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
375 "Wrong raw arg");
376
377 hook_remove(TSDN_NULL, handle);
378}
379TEST_END
380
381TEST_BEGIN(test_hooks_expand_simple) {
382 /* "Simple" in the sense that we're not in a realloc variant. */
383 hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
384 void *handle = hook_install(TSDN_NULL, &hooks);
385 expect_ptr_ne(handle, NULL, "Hook installation failed");
386
387 void *volatile ptr;
388
389 /* xallocx() */
390 reset();
391 ptr = malloc(1);
392 size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
393 expect_d_eq(call_count, 1, "Hook not called");
394 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
395 expect_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
396 expect_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
397 expect_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
398 expect_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
399 expect_u64_eq(new_usize, arg_result_raw, "Wrong result");
400 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
401 expect_u64_eq(100, arg_args_raw[1], "Wrong arg");
402 expect_u64_eq(200, arg_args_raw[2], "Wrong arg");
403 expect_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
404
405 hook_remove(TSDN_NULL, handle);
406}
407TEST_END
408
409TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
410 hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
411 &test_expand_hook, (void *)123};
412 void *handle = hook_install(TSDN_NULL, &hooks);
413 expect_ptr_ne(handle, NULL, "Hook installation failed");
414
415 void *volatile ptr;
416
417 /* realloc(NULL, size) as malloc */
418 reset();
419 ptr = realloc(NULL, 1);
420 expect_d_eq(call_count, 1, "Hook not called");
421 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
422 expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
423 expect_ptr_eq(ptr, arg_result, "Wrong result");
424 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
425 "Wrong raw result");
426 expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
427 expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
428 free(ptr);
429
430 /* realloc(ptr, 0) as free */
431 if (opt_zero_realloc_action == zero_realloc_action_free) {
432 ptr = malloc(1);
433 reset();
434 realloc(ptr, 0);
435 expect_d_eq(call_count, 1, "Hook not called");
436 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
437 expect_d_eq(arg_type, (int)hook_dalloc_realloc,
438 "Wrong hook type");
439 expect_ptr_eq(ptr, arg_address,
440 "Wrong pointer freed");
441 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0],
442 "Wrong raw arg");
443 expect_u64_eq((uintptr_t)0, arg_args_raw[1],
444 "Wrong raw arg");
445 }
446
447 /* realloc(NULL, 0) as malloc(0) */
448 reset();
449 ptr = realloc(NULL, 0);
450 expect_d_eq(call_count, 1, "Hook not called");
451 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
452 expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
453 expect_ptr_eq(ptr, arg_result, "Wrong result");
454 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
455 "Wrong raw result");
456 expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
457 expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
458 free(ptr);
459
460 hook_remove(TSDN_NULL, handle);
461}
462TEST_END
463
464static void
465do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
466 int expand_type, int dalloc_type) {
467 hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
468 &test_expand_hook, (void *)123};
469 void *handle = hook_install(TSDN_NULL, &hooks);
470 expect_ptr_ne(handle, NULL, "Hook installation failed");
471
472 void *volatile ptr;
473 void *volatile ptr2;
474
475 /* Realloc in-place, small. */
476 ptr = malloc(129);
477 reset();
478 ptr2 = ralloc(ptr, 130, flags);
479 expect_ptr_eq(ptr, ptr2, "Small realloc moved");
480
481 expect_d_eq(call_count, 1, "Hook not called");
482 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
483 expect_d_eq(arg_type, expand_type, "Wrong hook type");
484 expect_ptr_eq(ptr, arg_address, "Wrong address");
485 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
486 "Wrong raw result");
487 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
488 expect_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
489 free(ptr);
490
491 /*
492 * Realloc in-place, large. Since we can't guarantee the large case
493 * across all platforms, we stay resilient to moving results.
494 */
495 ptr = malloc(2 * 1024 * 1024);
496 free(ptr);
497 ptr2 = malloc(1 * 1024 * 1024);
498 reset();
499 ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
500 /* ptr is the new address, ptr2 is the old address. */
501 if (ptr == ptr2) {
502 expect_d_eq(call_count, 1, "Hook not called");
503 expect_d_eq(arg_type, expand_type, "Wrong hook type");
504 } else {
505 expect_d_eq(call_count, 2, "Wrong hooks called");
506 expect_ptr_eq(ptr, arg_result, "Wrong address");
507 expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
508 }
509 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
510 expect_ptr_eq(ptr2, arg_address, "Wrong address");
511 expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
512 "Wrong raw result");
513 expect_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
514 expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
515 "Wrong argument");
516 free(ptr);
517
518 /* Realloc with move, small. */
519 ptr = malloc(8);
520 reset();
521 ptr2 = ralloc(ptr, 128, flags);
522 expect_ptr_ne(ptr, ptr2, "Small realloc didn't move");
523
524 expect_d_eq(call_count, 2, "Hook not called");
525 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
526 expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
527 expect_ptr_eq(ptr, arg_address, "Wrong address");
528 expect_ptr_eq(ptr2, arg_result, "Wrong address");
529 expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
530 "Wrong raw result");
531 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
532 expect_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
533 free(ptr2);
534
535 /* Realloc with move, large. */
536 ptr = malloc(1);
537 reset();
538 ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
539 expect_ptr_ne(ptr, ptr2, "Large realloc didn't move");
540
541 expect_d_eq(call_count, 2, "Hook not called");
542 expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
543 expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
544 expect_ptr_eq(ptr, arg_address, "Wrong address");
545 expect_ptr_eq(ptr2, arg_result, "Wrong address");
546 expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
547 "Wrong raw result");
548 expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
549 expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
550 "Wrong argument");
551 free(ptr2);
552
553 hook_remove(TSDN_NULL, handle);
554}
555
556static void *
557realloc_wrapper(void *ptr, size_t size, UNUSED int flags) {
558 return realloc(ptr, size);
559}
560
561TEST_BEGIN(test_hooks_realloc) {
562 do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc,
563 hook_dalloc_realloc);
564}
565TEST_END
566
567TEST_BEGIN(test_hooks_rallocx) {
568 do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx,
569 hook_dalloc_rallocx);
570}
571TEST_END
572
573int
574main(void) {
575 /* We assert on call counts. */
576 return test_no_reentrancy(
577 test_hooks_basic,
578 test_hooks_null,
579 test_hooks_remove,
580 test_hooks_alloc_simple,
581 test_hooks_dalloc_simple,
582 test_hooks_expand_simple,
583 test_hooks_realloc_as_malloc_or_free,
584 test_hooks_realloc,
585 test_hooks_rallocx);
586}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/hpa.c b/examples/redis-unstable/deps/jemalloc/test/unit/hpa.c
deleted file mode 100644
index dfd57f3..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/hpa.c
+++ /dev/null
@@ -1,459 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/hpa.h"
4#include "jemalloc/internal/nstime.h"
5
6#define SHARD_IND 111
7
8#define ALLOC_MAX (HUGEPAGE / 4)
9
10typedef struct test_data_s test_data_t;
11struct test_data_s {
12 /*
13 * Must be the first member -- we convert back and forth between the
14 * test_data_t and the hpa_shard_t;
15 */
16 hpa_shard_t shard;
17 hpa_central_t central;
18 base_t *base;
19 edata_cache_t shard_edata_cache;
20
21 emap_t emap;
22};
23
24static hpa_shard_opts_t test_hpa_shard_opts_default = {
25 /* slab_max_alloc */
26 ALLOC_MAX,
27 /* hugification threshold */
28 HUGEPAGE,
29 /* dirty_mult */
30 FXP_INIT_PERCENT(25),
31 /* deferral_allowed */
32 false,
33 /* hugify_delay_ms */
34 10 * 1000,
35};
36
37static hpa_shard_t *
38create_test_data(hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
39 bool err;
40 base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
41 &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
42 assert_ptr_not_null(base, "");
43
44 test_data_t *test_data = malloc(sizeof(test_data_t));
45 assert_ptr_not_null(test_data, "");
46
47 test_data->base = base;
48
49 err = edata_cache_init(&test_data->shard_edata_cache, base);
50 assert_false(err, "");
51
52 err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
53 assert_false(err, "");
54
55 err = hpa_central_init(&test_data->central, test_data->base, hooks);
56 assert_false(err, "");
57
58 err = hpa_shard_init(&test_data->shard, &test_data->central,
59 &test_data->emap, test_data->base, &test_data->shard_edata_cache,
60 SHARD_IND, opts);
61 assert_false(err, "");
62
63 return (hpa_shard_t *)test_data;
64}
65
66static void
67destroy_test_data(hpa_shard_t *shard) {
68 test_data_t *test_data = (test_data_t *)shard;
69 base_delete(TSDN_NULL, test_data->base);
70 free(test_data);
71}
72
73TEST_BEGIN(test_alloc_max) {
74 test_skip_if(!hpa_supported());
75
76 hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
77 &test_hpa_shard_opts_default);
78 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
79
80 edata_t *edata;
81
82 /* Small max */
83 bool deferred_work_generated = false;
84 edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
85 false, &deferred_work_generated);
86 expect_ptr_not_null(edata, "Allocation of small max failed");
87 edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
88 false, false, &deferred_work_generated);
89 expect_ptr_null(edata, "Allocation of larger than small max succeeded");
90
91 destroy_test_data(shard);
92}
93TEST_END
94
95typedef struct mem_contents_s mem_contents_t;
96struct mem_contents_s {
97 uintptr_t my_addr;
98 size_t size;
99 edata_t *my_edata;
100 rb_node(mem_contents_t) link;
101};
102
103static int
104mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) {
105 return (a->my_addr > b->my_addr) - (a->my_addr < b->my_addr);
106}
107
108typedef rb_tree(mem_contents_t) mem_tree_t;
109rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link,
110 mem_contents_cmp);
111
112static void
113node_assert_ordered(mem_contents_t *a, mem_contents_t *b) {
114 assert_zu_lt(a->my_addr, a->my_addr + a->size, "Overflow");
115 assert_zu_le(a->my_addr + a->size, b->my_addr, "");
116}
117
118static void
119node_check(mem_tree_t *tree, mem_contents_t *contents) {
120 edata_t *edata = contents->my_edata;
121 assert_ptr_eq(contents, (void *)contents->my_addr, "");
122 assert_ptr_eq(contents, edata_base_get(edata), "");
123 assert_zu_eq(contents->size, edata_size_get(edata), "");
124 assert_ptr_eq(contents->my_edata, edata, "");
125
126 mem_contents_t *next = mem_tree_next(tree, contents);
127 if (next != NULL) {
128 node_assert_ordered(contents, next);
129 }
130 mem_contents_t *prev = mem_tree_prev(tree, contents);
131 if (prev != NULL) {
132 node_assert_ordered(prev, contents);
133 }
134}
135
136static void
137node_insert(mem_tree_t *tree, edata_t *edata, size_t npages) {
138 mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
139 contents->my_addr = (uintptr_t)edata_base_get(edata);
140 contents->size = edata_size_get(edata);
141 contents->my_edata = edata;
142 mem_tree_insert(tree, contents);
143 node_check(tree, contents);
144}
145
146static void
147node_remove(mem_tree_t *tree, edata_t *edata) {
148 mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
149 node_check(tree, contents);
150 mem_tree_remove(tree, contents);
151}
152
153TEST_BEGIN(test_stress) {
154 test_skip_if(!hpa_supported());
155
156 hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
157 &test_hpa_shard_opts_default);
158
159 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
160
161 const size_t nlive_edatas_max = 500;
162 size_t nlive_edatas = 0;
163 edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *));
164 /*
165 * Nothing special about this constant; we're only fixing it for
166 * consistency across runs.
167 */
168 size_t prng_state = (size_t)0x76999ffb014df07c;
169
170 mem_tree_t tree;
171 mem_tree_new(&tree);
172
173 bool deferred_work_generated = false;
174
175 for (size_t i = 0; i < 100 * 1000; i++) {
176 size_t operation = prng_range_zu(&prng_state, 2);
177 if (operation == 0) {
178 /* Alloc */
179 if (nlive_edatas == nlive_edatas_max) {
180 continue;
181 }
182
183 /*
184 * We make sure to get an even balance of small and
185 * large allocations.
186 */
187 size_t npages_min = 1;
188 size_t npages_max = ALLOC_MAX / PAGE;
189 size_t npages = npages_min + prng_range_zu(&prng_state,
190 npages_max - npages_min);
191 edata_t *edata = pai_alloc(tsdn, &shard->pai,
192 npages * PAGE, PAGE, false, false, false,
193 &deferred_work_generated);
194 assert_ptr_not_null(edata,
195 "Unexpected allocation failure");
196 live_edatas[nlive_edatas] = edata;
197 nlive_edatas++;
198 node_insert(&tree, edata, npages);
199 } else {
200 /* Free. */
201 if (nlive_edatas == 0) {
202 continue;
203 }
204 size_t victim = prng_range_zu(&prng_state, nlive_edatas);
205 edata_t *to_free = live_edatas[victim];
206 live_edatas[victim] = live_edatas[nlive_edatas - 1];
207 nlive_edatas--;
208 node_remove(&tree, to_free);
209 pai_dalloc(tsdn, &shard->pai, to_free,
210 &deferred_work_generated);
211 }
212 }
213
214 size_t ntreenodes = 0;
215 for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL;
216 contents = mem_tree_next(&tree, contents)) {
217 ntreenodes++;
218 node_check(&tree, contents);
219 }
220 expect_zu_eq(ntreenodes, nlive_edatas, "");
221
222 /*
223 * Test hpa_shard_destroy, which requires as a precondition that all its
224 * extents have been deallocated.
225 */
226 for (size_t i = 0; i < nlive_edatas; i++) {
227 edata_t *to_free = live_edatas[i];
228 node_remove(&tree, to_free);
229 pai_dalloc(tsdn, &shard->pai, to_free,
230 &deferred_work_generated);
231 }
232 hpa_shard_destroy(tsdn, shard);
233
234 free(live_edatas);
235 destroy_test_data(shard);
236}
237TEST_END
238
239static void
240expect_contiguous(edata_t **edatas, size_t nedatas) {
241 for (size_t i = 0; i < nedatas; i++) {
242 size_t expected = (size_t)edata_base_get(edatas[0])
243 + i * PAGE;
244 expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]),
245 "Mismatch at index %zu", i);
246 }
247}
248
249TEST_BEGIN(test_alloc_dalloc_batch) {
250 test_skip_if(!hpa_supported());
251
252 hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
253 &test_hpa_shard_opts_default);
254 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
255
256 bool deferred_work_generated = false;
257
258 enum {NALLOCS = 8};
259
260 edata_t *allocs[NALLOCS];
261 /*
262 * Allocate a mix of ways; first half from regular alloc, second half
263 * from alloc_batch.
264 */
265 for (size_t i = 0; i < NALLOCS / 2; i++) {
266 allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
267 /* zero */ false, /* guarded */ false,
268 /* frequent_reuse */ false, &deferred_work_generated);
269 expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
270 }
271 edata_list_active_t allocs_list;
272 edata_list_active_init(&allocs_list);
273 size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
274 &allocs_list, &deferred_work_generated);
275 expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
276 for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
277 allocs[i] = edata_list_active_first(&allocs_list);
278 edata_list_active_remove(&allocs_list, allocs[i]);
279 }
280
281 /*
282 * Should have allocated them contiguously, despite the differing
283 * methods used.
284 */
285 void *orig_base = edata_base_get(allocs[0]);
286 expect_contiguous(allocs, NALLOCS);
287
288 /*
289 * Batch dalloc the first half, individually deallocate the second half.
290 */
291 for (size_t i = 0; i < NALLOCS / 2; i++) {
292 edata_list_active_append(&allocs_list, allocs[i]);
293 }
294 pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
295 &deferred_work_generated);
296 for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
297 pai_dalloc(tsdn, &shard->pai, allocs[i],
298 &deferred_work_generated);
299 }
300
301 /* Reallocate (individually), and ensure reuse and contiguity. */
302 for (size_t i = 0; i < NALLOCS; i++) {
303 allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
304 /* zero */ false, /* guarded */ false, /* frequent_reuse */
305 false, &deferred_work_generated);
306 expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
307 }
308 void *new_base = edata_base_get(allocs[0]);
309 expect_ptr_eq(orig_base, new_base,
310 "Failed to reuse the allocated memory.");
311 expect_contiguous(allocs, NALLOCS);
312
313 destroy_test_data(shard);
314}
315TEST_END
316
317static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
318static void *
319defer_test_map(size_t size) {
320 void *result = (void *)defer_bump_ptr;
321 defer_bump_ptr += size;
322 return result;
323}
324
325static void
326defer_test_unmap(void *ptr, size_t size) {
327 (void)ptr;
328 (void)size;
329}
330
331static bool defer_purge_called = false;
332static void
333defer_test_purge(void *ptr, size_t size) {
334 (void)ptr;
335 (void)size;
336 defer_purge_called = true;
337}
338
339static bool defer_hugify_called = false;
340static void
341defer_test_hugify(void *ptr, size_t size) {
342 defer_hugify_called = true;
343}
344
345static bool defer_dehugify_called = false;
346static void
347defer_test_dehugify(void *ptr, size_t size) {
348 defer_dehugify_called = true;
349}
350
351static nstime_t defer_curtime;
352static void
353defer_test_curtime(nstime_t *r_time, bool first_reading) {
354 *r_time = defer_curtime;
355}
356
357static uint64_t
358defer_test_ms_since(nstime_t *past_time) {
359 return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
360}
361
362TEST_BEGIN(test_defer_time) {
363 test_skip_if(!hpa_supported());
364
365 hpa_hooks_t hooks;
366 hooks.map = &defer_test_map;
367 hooks.unmap = &defer_test_unmap;
368 hooks.purge = &defer_test_purge;
369 hooks.hugify = &defer_test_hugify;
370 hooks.dehugify = &defer_test_dehugify;
371 hooks.curtime = &defer_test_curtime;
372 hooks.ms_since = &defer_test_ms_since;
373
374 hpa_shard_opts_t opts = test_hpa_shard_opts_default;
375 opts.deferral_allowed = true;
376
377 hpa_shard_t *shard = create_test_data(&hooks, &opts);
378
379 bool deferred_work_generated = false;
380
381 nstime_init(&defer_curtime, 0);
382 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
383 edata_t *edatas[HUGEPAGE_PAGES];
384 for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
385 edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
386 false, false, &deferred_work_generated);
387 expect_ptr_not_null(edatas[i], "Unexpected null edata");
388 }
389 hpa_shard_do_deferred_work(tsdn, shard);
390 expect_false(defer_hugify_called, "Hugified too early");
391
392 /* Hugification delay is set to 10 seconds in options. */
393 nstime_init2(&defer_curtime, 11, 0);
394 hpa_shard_do_deferred_work(tsdn, shard);
395 expect_true(defer_hugify_called, "Failed to hugify");
396
397 defer_hugify_called = false;
398
399 /* Purge. Recall that dirty_mult is .25. */
400 for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
401 pai_dalloc(tsdn, &shard->pai, edatas[i],
402 &deferred_work_generated);
403 }
404
405 hpa_shard_do_deferred_work(tsdn, shard);
406
407 expect_false(defer_hugify_called, "Hugified too early");
408 expect_true(defer_dehugify_called, "Should have dehugified");
409 expect_true(defer_purge_called, "Should have purged");
410 defer_hugify_called = false;
411 defer_dehugify_called = false;
412 defer_purge_called = false;
413
414 /*
415 * Refill the page. We now meet the hugification threshold; we should
416 * be marked for pending hugify.
417 */
418 for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
419 edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
420 false, false, &deferred_work_generated);
421 expect_ptr_not_null(edatas[i], "Unexpected null edata");
422 }
423 /*
424 * We would be ineligible for hugification, had we not already met the
425 * threshold before dipping below it.
426 */
427 pai_dalloc(tsdn, &shard->pai, edatas[0],
428 &deferred_work_generated);
429 /* Wait for the threshold again. */
430 nstime_init2(&defer_curtime, 22, 0);
431 hpa_shard_do_deferred_work(tsdn, shard);
432 expect_true(defer_hugify_called, "Hugified too early");
433 expect_false(defer_dehugify_called, "Unexpected dehugify");
434 expect_false(defer_purge_called, "Unexpected purge");
435
436 destroy_test_data(shard);
437}
438TEST_END
439
440int
441main(void) {
442 /*
443 * These trigger unused-function warnings on CI runs, even if declared
444 * with static inline.
445 */
446 (void)mem_tree_empty;
447 (void)mem_tree_last;
448 (void)mem_tree_search;
449 (void)mem_tree_nsearch;
450 (void)mem_tree_psearch;
451 (void)mem_tree_iter;
452 (void)mem_tree_reverse_iter;
453 (void)mem_tree_destroy;
454 return test_no_reentrancy(
455 test_alloc_max,
456 test_stress,
457 test_alloc_dalloc_batch,
458 test_defer_time);
459}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.c b/examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.c
deleted file mode 100644
index 81c2561..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.c
+++ /dev/null
@@ -1,188 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "test/sleep.h"
3
4static void
5sleep_for_background_thread_interval() {
6 /*
7 * The sleep interval set in our .sh file is 50ms. So it likely will
8 * run if we sleep for four times that.
9 */
10 sleep_ns(200 * 1000 * 1000);
11}
12
13static unsigned
14create_arena() {
15 unsigned arena_ind;
16 size_t sz;
17
18 sz = sizeof(unsigned);
19 expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2),
20 0, "Unexpected mallctl() failure");
21 return arena_ind;
22}
23
24static size_t
25get_empty_ndirty(unsigned arena_ind) {
26 int err;
27 size_t ndirty_huge;
28 size_t ndirty_nonhuge;
29 uint64_t epoch = 1;
30 size_t sz = sizeof(epoch);
31 err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch,
32 sizeof(epoch));
33 expect_d_eq(0, err, "Unexpected mallctl() failure");
34
35 size_t mib[6];
36 size_t miblen = sizeof(mib)/sizeof(mib[0]);
37 err = mallctlnametomib(
38 "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib,
39 &miblen);
40 expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
41
42 sz = sizeof(ndirty_nonhuge);
43 mib[2] = arena_ind;
44 err = mallctlbymib(mib, miblen, &ndirty_nonhuge, &sz, NULL, 0);
45 expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
46
47 err = mallctlnametomib(
48 "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib,
49 &miblen);
50 expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
51
52 sz = sizeof(ndirty_huge);
53 mib[2] = arena_ind;
54 err = mallctlbymib(mib, miblen, &ndirty_huge, &sz, NULL, 0);
55 expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
56
57 return ndirty_huge + ndirty_nonhuge;
58}
59
60static void
61set_background_thread_enabled(bool enabled) {
62 int err;
63 err = je_mallctl("background_thread", NULL, NULL, &enabled,
64 sizeof(enabled));
65 expect_d_eq(0, err, "Unexpected mallctl failure");
66}
67
68static void
69wait_until_thread_is_enabled(unsigned arena_id) {
70 tsd_t* tsd = tsd_fetch();
71
72 bool sleeping = false;
73 int iterations = 0;
74 do {
75 background_thread_info_t *info =
76 background_thread_info_get(arena_id);
77 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
78 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
79 sleeping = background_thread_indefinite_sleep(info);
80 assert_d_lt(iterations, UINT64_C(1000000),
81 "Waiting for a thread to start for too long");
82 } while (!sleeping);
83}
84
85static void
86expect_purging(unsigned arena_ind, bool expect_deferred) {
87 size_t empty_ndirty;
88
89 empty_ndirty = get_empty_ndirty(arena_ind);
90 expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
91
92 /*
93 * It's possible that we get unlucky with our stats collection timing,
94 * and the background thread runs in between the deallocation and the
95 * stats collection. So we retry 10 times, and see if we *ever* see
96 * deferred reclamation.
97 */
98 bool observed_dirty_page = false;
99 for (int i = 0; i < 10; i++) {
100 void *ptr = mallocx(PAGE,
101 MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
102 empty_ndirty = get_empty_ndirty(arena_ind);
103 expect_zu_eq(0, empty_ndirty, "All pages should be active");
104 dallocx(ptr, MALLOCX_TCACHE_NONE);
105 empty_ndirty = get_empty_ndirty(arena_ind);
106 if (expect_deferred) {
107 expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
108 opt_prof, "Unexpected extra dirty page count: %zu",
109 empty_ndirty);
110 } else {
111 assert_zu_eq(0, empty_ndirty,
112 "Saw dirty pages without deferred purging");
113 }
114 if (empty_ndirty > 0) {
115 observed_dirty_page = true;
116 break;
117 }
118 }
119 expect_b_eq(expect_deferred, observed_dirty_page, "");
120
121 /*
122 * Under high concurrency / heavy test load (e.g. using run_test.sh),
123 * the background thread may not get scheduled for a longer period of
124 * time. Retry 100 times max before bailing out.
125 */
126 unsigned retry = 0;
127 while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
128 expect_deferred && (retry++ < 100)) {
129 sleep_for_background_thread_interval();
130 }
131
132 expect_zu_eq(0, empty_ndirty, "Should have seen a background purge");
133}
134
135TEST_BEGIN(test_hpa_background_thread_purges) {
136 test_skip_if(!config_stats);
137 test_skip_if(!hpa_supported());
138 test_skip_if(!have_background_thread);
139 /* Skip since guarded pages cannot be allocated from hpa. */
140 test_skip_if(san_guard_enabled());
141
142 unsigned arena_ind = create_arena();
143 /*
144 * Our .sh sets dirty mult to 0, so all dirty pages should get purged
145 * any time any thread frees.
146 */
147 expect_purging(arena_ind, /* expect_deferred */ true);
148}
149TEST_END
150
151TEST_BEGIN(test_hpa_background_thread_enable_disable) {
152 test_skip_if(!config_stats);
153 test_skip_if(!hpa_supported());
154 test_skip_if(!have_background_thread);
155 /* Skip since guarded pages cannot be allocated from hpa. */
156 test_skip_if(san_guard_enabled());
157
158 unsigned arena_ind = create_arena();
159
160 set_background_thread_enabled(false);
161 expect_purging(arena_ind, false);
162
163 set_background_thread_enabled(true);
164 wait_until_thread_is_enabled(arena_ind);
165 expect_purging(arena_ind, true);
166}
167TEST_END
168
169int
170main(void) {
171 /*
172 * OK, this is a sort of nasty hack. We don't want to add *another*
173 * config option for HPA (the intent is that it becomes available on
174 * more platforms over time, and we're trying to prune back config
175 * options generally. But we'll get initialization errors on other
176 * platforms if we set hpa:true in the MALLOC_CONF (even if we set
177 * abort_conf:false as well). So we reach into the internals and set
178 * them directly, but only if we know that we're actually going to do
179 * something nontrivial in the tests.
180 */
181 if (config_stats && hpa_supported() && have_background_thread) {
182 opt_hpa = true;
183 opt_background_thread = true;
184 }
185 return test_no_reentrancy(
186 test_hpa_background_thread_purges,
187 test_hpa_background_thread_enable_disable);
188}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.sh b/examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.sh
deleted file mode 100644
index 65a56a0..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/hpa_background_thread.sh
+++ /dev/null
@@ -1,4 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="hpa_dirty_mult:0,hpa_min_purge_interval_ms:50,hpa_sec_nshards:0"
4
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/hpdata.c b/examples/redis-unstable/deps/jemalloc/test/unit/hpdata.c
deleted file mode 100644
index 288e71d..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/hpdata.c
+++ /dev/null
@@ -1,244 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define HPDATA_ADDR ((void *)(10 * HUGEPAGE))
4#define HPDATA_AGE 123
5
6TEST_BEGIN(test_reserve_alloc) {
7 hpdata_t hpdata;
8 hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
9
10 /* Allocating a page at a time, we should do first fit. */
11 for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
12 expect_true(hpdata_consistent(&hpdata), "");
13 expect_zu_eq(HUGEPAGE_PAGES - i,
14 hpdata_longest_free_range_get(&hpdata), "");
15 void *alloc = hpdata_reserve_alloc(&hpdata, PAGE);
16 expect_ptr_eq((char *)HPDATA_ADDR + i * PAGE, alloc, "");
17 expect_true(hpdata_consistent(&hpdata), "");
18 }
19 expect_true(hpdata_consistent(&hpdata), "");
20 expect_zu_eq(0, hpdata_longest_free_range_get(&hpdata), "");
21
22 /*
23 * Build up a bigger free-range, 2 pages at a time, until we've got 6
24 * adjacent free pages total. Pages 8-13 should be unreserved after
25 * this.
26 */
27 hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 10 * PAGE, 2 * PAGE);
28 expect_true(hpdata_consistent(&hpdata), "");
29 expect_zu_eq(2, hpdata_longest_free_range_get(&hpdata), "");
30
31 hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 12 * PAGE, 2 * PAGE);
32 expect_true(hpdata_consistent(&hpdata), "");
33 expect_zu_eq(4, hpdata_longest_free_range_get(&hpdata), "");
34
35 hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 8 * PAGE, 2 * PAGE);
36 expect_true(hpdata_consistent(&hpdata), "");
37 expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
38
39 /*
40 * Leave page 14 reserved, but free page 15 (this test the case where
41 * unreserving combines two ranges).
42 */
43 hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 15 * PAGE, PAGE);
44 /*
45 * Longest free range shouldn't change; we've got a free range of size
46 * 6, then a reserved page, then another free range.
47 */
48 expect_true(hpdata_consistent(&hpdata), "");
49 expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
50
51 /* After freeing page 14, the two ranges get combined. */
52 hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 14 * PAGE, PAGE);
53 expect_true(hpdata_consistent(&hpdata), "");
54 expect_zu_eq(8, hpdata_longest_free_range_get(&hpdata), "");
55}
56TEST_END
57
58TEST_BEGIN(test_purge_simple) {
59 hpdata_t hpdata;
60 hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
61
62 void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE_PAGES / 2 * PAGE);
63 expect_ptr_eq(alloc, HPDATA_ADDR, "");
64
65 /* Create HUGEPAGE_PAGES / 4 dirty inactive pages at the beginning. */
66 hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
67
68 expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
69
70 hpdata_alloc_allowed_set(&hpdata, false);
71 hpdata_purge_state_t purge_state;
72 size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
73 expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge, "");
74
75 void *purge_addr;
76 size_t purge_size;
77 bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
78 &purge_size);
79 expect_true(got_result, "");
80 expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
81 expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
82
83 got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
84 &purge_size);
85 expect_false(got_result, "Unexpected additional purge range: "
86 "extent at %p of size %zu", purge_addr, purge_size);
87
88 hpdata_purge_end(&hpdata, &purge_state);
89 expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
90}
91TEST_END
92
93/*
94 * We only test intervening dalloc's not intervening allocs; the latter are
95 * disallowed as a purging precondition (because they interfere with purging
96 * across a retained extent, saving a purge call).
97 */
98TEST_BEGIN(test_purge_intervening_dalloc) {
99 hpdata_t hpdata;
100 hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
101
102 /* Allocate the first 3/4 of the pages. */
103 void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE);
104 expect_ptr_eq(alloc, HPDATA_ADDR, "");
105
106 /* Free the first 1/4 and the third 1/4 of the pages. */
107 hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
108 hpdata_unreserve(&hpdata,
109 (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
110 HUGEPAGE_PAGES / 4 * PAGE);
111
112 expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
113
114 hpdata_alloc_allowed_set(&hpdata, false);
115 hpdata_purge_state_t purge_state;
116 size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
117 expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge, "");
118
119 void *purge_addr;
120 size_t purge_size;
121 /* First purge. */
122 bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
123 &purge_size);
124 expect_true(got_result, "");
125 expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
126 expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
127
128 /* Deallocate the second 1/4 before the second purge occurs. */
129 hpdata_unreserve(&hpdata,
130 (void *)((uintptr_t)alloc + 1 * HUGEPAGE_PAGES / 4 * PAGE),
131 HUGEPAGE_PAGES / 4 * PAGE);
132
133 /* Now continue purging. */
134 got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
135 &purge_size);
136 expect_true(got_result, "");
137 expect_ptr_eq(
138 (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
139 purge_addr, "");
140 expect_zu_ge(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
141
142 got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
143 &purge_size);
144 expect_false(got_result, "Unexpected additional purge range: "
145 "extent at %p of size %zu", purge_addr, purge_size);
146
147 hpdata_purge_end(&hpdata, &purge_state);
148
149 expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
150}
151TEST_END
152
153TEST_BEGIN(test_purge_over_retained) {
154 void *purge_addr;
155 size_t purge_size;
156
157 hpdata_t hpdata;
158 hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
159
160 /* Allocate the first 3/4 of the pages. */
161 void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE);
162 expect_ptr_eq(alloc, HPDATA_ADDR, "");
163
164 /* Free the second quarter. */
165 void *second_quarter =
166 (void *)((uintptr_t)alloc + HUGEPAGE_PAGES / 4 * PAGE);
167 hpdata_unreserve(&hpdata, second_quarter, HUGEPAGE_PAGES / 4 * PAGE);
168
169 expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
170
171 /* Purge the second quarter. */
172 hpdata_alloc_allowed_set(&hpdata, false);
173 hpdata_purge_state_t purge_state;
174 size_t to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
175 expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge_dirty, "");
176
177 bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
178 &purge_size);
179 expect_true(got_result, "");
180 expect_ptr_eq(second_quarter, purge_addr, "");
181 expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
182
183 got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
184 &purge_size);
185 expect_false(got_result, "Unexpected additional purge range: "
186 "extent at %p of size %zu", purge_addr, purge_size);
187 hpdata_purge_end(&hpdata, &purge_state);
188
189 expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
190
191 /* Free the first and third quarter. */
192 hpdata_unreserve(&hpdata, HPDATA_ADDR, HUGEPAGE_PAGES / 4 * PAGE);
193 hpdata_unreserve(&hpdata,
194 (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
195 HUGEPAGE_PAGES / 4 * PAGE);
196
197 /*
198 * Purge again. The second quarter is retained, so we can safely
199 * re-purge it. We expect a single purge of 3/4 of the hugepage,
200 * purging half its pages.
201 */
202 to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
203 expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge_dirty, "");
204
205 got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
206 &purge_size);
207 expect_true(got_result, "");
208 expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
209 expect_zu_eq(3 * HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
210
211 got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
212 &purge_size);
213 expect_false(got_result, "Unexpected additional purge range: "
214 "extent at %p of size %zu", purge_addr, purge_size);
215 hpdata_purge_end(&hpdata, &purge_state);
216
217 expect_zu_eq(hpdata_ntouched_get(&hpdata), 0, "");
218}
219TEST_END
220
221TEST_BEGIN(test_hugify) {
222 hpdata_t hpdata;
223 hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
224
225 void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE / 2);
226 expect_ptr_eq(alloc, HPDATA_ADDR, "");
227
228 expect_zu_eq(HUGEPAGE_PAGES / 2, hpdata_ntouched_get(&hpdata), "");
229
230 hpdata_hugify(&hpdata);
231
232 /* Hugeifying should have increased the dirty page count. */
233 expect_zu_eq(HUGEPAGE_PAGES, hpdata_ntouched_get(&hpdata), "");
234}
235TEST_END
236
237int main(void) {
238 return test_no_reentrancy(
239 test_reserve_alloc,
240 test_purge_simple,
241 test_purge_intervening_dalloc,
242 test_purge_over_retained,
243 test_hugify);
244}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/huge.c b/examples/redis-unstable/deps/jemalloc/test/unit/huge.c
deleted file mode 100644
index ec64e50..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/huge.c
+++ /dev/null
@@ -1,108 +0,0 @@
1#include "test/jemalloc_test.h"
2
3/* Threshold: 2 << 20 = 2097152. */
4const char *malloc_conf = "oversize_threshold:2097152";
5
6#define HUGE_SZ (2 << 20)
7#define SMALL_SZ (8)
8
9TEST_BEGIN(huge_bind_thread) {
10 unsigned arena1, arena2;
11 size_t sz = sizeof(unsigned);
12
13 /* Bind to a manual arena. */
14 expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
15 "Failed to create arena");
16 expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
17 sizeof(arena1)), 0, "Fail to bind thread");
18
19 void *ptr = mallocx(HUGE_SZ, 0);
20 expect_ptr_not_null(ptr, "Fail to allocate huge size");
21 expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
22 sizeof(ptr)), 0, "Unexpected mallctl() failure");
23 expect_u_eq(arena1, arena2, "Wrong arena used after binding");
24 dallocx(ptr, 0);
25
26 /* Switch back to arena 0. */
27 test_skip_if(have_percpu_arena &&
28 PERCPU_ARENA_ENABLED(opt_percpu_arena));
29 arena2 = 0;
30 expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
31 sizeof(arena2)), 0, "Fail to bind thread");
32 ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
33 expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
34 sizeof(ptr)), 0, "Unexpected mallctl() failure");
35 expect_u_eq(arena2, 0, "Wrong arena used after binding");
36 dallocx(ptr, MALLOCX_TCACHE_NONE);
37
38 /* Then huge allocation should use the huge arena. */
39 ptr = mallocx(HUGE_SZ, 0);
40 expect_ptr_not_null(ptr, "Fail to allocate huge size");
41 expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
42 sizeof(ptr)), 0, "Unexpected mallctl() failure");
43 expect_u_ne(arena2, 0, "Wrong arena used after binding");
44 expect_u_ne(arena1, arena2, "Wrong arena used after binding");
45 dallocx(ptr, 0);
46}
47TEST_END
48
49TEST_BEGIN(huge_mallocx) {
50 unsigned arena1, arena2;
51 size_t sz = sizeof(unsigned);
52
53 expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
54 "Failed to create arena");
55 void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
56 expect_ptr_not_null(huge, "Fail to allocate huge size");
57 expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
58 sizeof(huge)), 0, "Unexpected mallctl() failure");
59 expect_u_eq(arena1, arena2, "Wrong arena used for mallocx");
60 dallocx(huge, MALLOCX_ARENA(arena1));
61
62 void *huge2 = mallocx(HUGE_SZ, 0);
63 expect_ptr_not_null(huge, "Fail to allocate huge size");
64 expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
65 sizeof(huge2)), 0, "Unexpected mallctl() failure");
66 expect_u_ne(arena1, arena2,
67 "Huge allocation should not come from the manual arena.");
68 expect_u_ne(arena2, 0,
69 "Huge allocation should not come from the arena 0.");
70 dallocx(huge2, 0);
71}
72TEST_END
73
74TEST_BEGIN(huge_allocation) {
75 unsigned arena1, arena2;
76
77 void *ptr = mallocx(HUGE_SZ, 0);
78 expect_ptr_not_null(ptr, "Fail to allocate huge size");
79 size_t sz = sizeof(unsigned);
80 expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
81 0, "Unexpected mallctl() failure");
82 expect_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
83 dallocx(ptr, 0);
84
85 ptr = mallocx(HUGE_SZ >> 1, 0);
86 expect_ptr_not_null(ptr, "Fail to allocate half huge size");
87 expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
88 sizeof(ptr)), 0, "Unexpected mallctl() failure");
89 expect_u_ne(arena1, arena2, "Wrong arena used for half huge");
90 dallocx(ptr, 0);
91
92 ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
93 expect_ptr_not_null(ptr, "Fail to allocate small size");
94 expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
95 sizeof(ptr)), 0, "Unexpected mallctl() failure");
96 expect_u_ne(arena1, arena2,
97 "Huge and small should be from different arenas");
98 dallocx(ptr, 0);
99}
100TEST_END
101
102int
103main(void) {
104 return test(
105 huge_allocation,
106 huge_mallocx,
107 huge_bind_thread);
108}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/inspect.c b/examples/redis-unstable/deps/jemalloc/test/unit/inspect.c
deleted file mode 100644
index fe59e59..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/inspect.c
+++ /dev/null
@@ -1,278 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \
4 assert_d_eq(mallctl("experimental.utilization." node, \
5 a, b, c, d), EINVAL, "Should fail when " why_inval); \
6 assert_zu_eq(out_sz, out_sz_ref, \
7 "Output size touched when given invalid arguments"); \
8 assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \
9 "Output content touched when given invalid arguments"); \
10} while (0)
11
12#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \
13 TEST_UTIL_EINVAL("query", a, b, c, d, why_inval)
14#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \
15 TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval)
16
17#define TEST_UTIL_VALID(node) do { \
18 assert_d_eq(mallctl("experimental.utilization." node, \
19 out, &out_sz, in, in_sz), 0, \
20 "Should return 0 on correct arguments"); \
21 expect_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
22 expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
23 "Output content should be changed"); \
24} while (0)
25
26#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query")
27
28#define TEST_MAX_SIZE (1 << 20)
29
30TEST_BEGIN(test_query) {
31 size_t sz;
32 /*
33 * Select some sizes that can span both small and large sizes, and are
34 * numerically unrelated to any size boundaries.
35 */
36 for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
37 sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) {
38 void *p = mallocx(sz, 0);
39 void **in = &p;
40 size_t in_sz = sizeof(const void *);
41 size_t out_sz = sizeof(void *) + sizeof(size_t) * 5;
42 void *out = mallocx(out_sz, 0);
43 void *out_ref = mallocx(out_sz, 0);
44 size_t out_sz_ref = out_sz;
45
46 assert_ptr_not_null(p,
47 "test pointer allocation failed");
48 assert_ptr_not_null(out,
49 "test output allocation failed");
50 assert_ptr_not_null(out_ref,
51 "test reference output allocation failed");
52
53#define SLABCUR_READ(out) (*(void **)out)
54#define COUNTS(out) ((size_t *)((void **)out + 1))
55#define NFREE_READ(out) COUNTS(out)[0]
56#define NREGS_READ(out) COUNTS(out)[1]
57#define SIZE_READ(out) COUNTS(out)[2]
58#define BIN_NFREE_READ(out) COUNTS(out)[3]
59#define BIN_NREGS_READ(out) COUNTS(out)[4]
60
61 SLABCUR_READ(out) = NULL;
62 NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1;
63 BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1;
64 memcpy(out_ref, out, out_sz);
65
66 /* Test invalid argument(s) errors */
67 TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz,
68 "old is NULL");
69 TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz,
70 "oldlenp is NULL");
71 TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz,
72 "newp is NULL");
73 TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0,
74 "newlen is zero");
75 in_sz -= 1;
76 TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
77 "invalid newlen");
78 in_sz += 1;
79 out_sz_ref = out_sz -= 2 * sizeof(size_t);
80 TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
81 "invalid *oldlenp");
82 out_sz_ref = out_sz += 2 * sizeof(size_t);
83
84 /* Examine output for valid call */
85 TEST_UTIL_VALID("query");
86 expect_zu_le(sz, SIZE_READ(out),
87 "Extent size should be at least allocation size");
88 expect_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
89 "Extent size should be a multiple of page size");
90
91 /*
92 * We don't do much bin checking if prof is on, since profiling
93 * can produce extents that are for small size classes but not
94 * slabs, which interferes with things like region counts.
95 */
96 if (!opt_prof && sz <= SC_SMALL_MAXCLASS) {
97 expect_zu_le(NFREE_READ(out), NREGS_READ(out),
98 "Extent free count exceeded region count");
99 expect_zu_le(NREGS_READ(out), SIZE_READ(out),
100 "Extent region count exceeded size");
101 expect_zu_ne(NREGS_READ(out), 0,
102 "Extent region count must be positive");
103 expect_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out)
104 != NULL && SLABCUR_READ(out) <= p),
105 "Allocation should follow first fit principle");
106
107 if (config_stats) {
108 expect_zu_le(BIN_NFREE_READ(out),
109 BIN_NREGS_READ(out),
110 "Bin free count exceeded region count");
111 expect_zu_ne(BIN_NREGS_READ(out), 0,
112 "Bin region count must be positive");
113 expect_zu_le(NFREE_READ(out),
114 BIN_NFREE_READ(out),
115 "Extent free count exceeded bin free count");
116 expect_zu_le(NREGS_READ(out),
117 BIN_NREGS_READ(out),
118 "Extent region count exceeded "
119 "bin region count");
120 expect_zu_eq(BIN_NREGS_READ(out)
121 % NREGS_READ(out), 0,
122 "Bin region count isn't a multiple of "
123 "extent region count");
124 expect_zu_le(
125 BIN_NFREE_READ(out) - NFREE_READ(out),
126 BIN_NREGS_READ(out) - NREGS_READ(out),
127 "Free count in other extents in the bin "
128 "exceeded region count in other extents "
129 "in the bin");
130 expect_zu_le(NREGS_READ(out) - NFREE_READ(out),
131 BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
132 "Extent utilized count exceeded "
133 "bin utilized count");
134 }
135 } else if (sz > SC_SMALL_MAXCLASS) {
136 expect_zu_eq(NFREE_READ(out), 0,
137 "Extent free count should be zero");
138 expect_zu_eq(NREGS_READ(out), 1,
139 "Extent region count should be one");
140 expect_ptr_null(SLABCUR_READ(out),
141 "Current slab must be null for large size classes");
142 if (config_stats) {
143 expect_zu_eq(BIN_NFREE_READ(out), 0,
144 "Bin free count must be zero for "
145 "large sizes");
146 expect_zu_eq(BIN_NREGS_READ(out), 0,
147 "Bin region count must be zero for "
148 "large sizes");
149 }
150 }
151
152#undef BIN_NREGS_READ
153#undef BIN_NFREE_READ
154#undef SIZE_READ
155#undef NREGS_READ
156#undef NFREE_READ
157#undef COUNTS
158#undef SLABCUR_READ
159
160 free(out_ref);
161 free(out);
162 free(p);
163 }
164}
165TEST_END
166
167TEST_BEGIN(test_batch) {
168 size_t sz;
169 /*
170 * Select some sizes that can span both small and large sizes, and are
171 * numerically unrelated to any size boundaries.
172 */
173 for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
174 sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) {
175 void *p = mallocx(sz, 0);
176 void *q = mallocx(sz, 0);
177 void *in[] = {p, q};
178 size_t in_sz = sizeof(const void *) * 2;
179 size_t out[] = {-1, -1, -1, -1, -1, -1};
180 size_t out_sz = sizeof(size_t) * 6;
181 size_t out_ref[] = {-1, -1, -1, -1, -1, -1};
182 size_t out_sz_ref = out_sz;
183
184 assert_ptr_not_null(p, "test pointer allocation failed");
185 assert_ptr_not_null(q, "test pointer allocation failed");
186
187 /* Test invalid argument(s) errors */
188 TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz,
189 "old is NULL");
190 TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz,
191 "oldlenp is NULL");
192 TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz,
193 "newp is NULL");
194 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0,
195 "newlen is zero");
196 in_sz -= 1;
197 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
198 "newlen is not an exact multiple");
199 in_sz += 1;
200 out_sz_ref = out_sz -= 2 * sizeof(size_t);
201 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
202 "*oldlenp is not an exact multiple");
203 out_sz_ref = out_sz += 2 * sizeof(size_t);
204 in_sz -= sizeof(const void *);
205 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
206 "*oldlenp and newlen do not match");
207 in_sz += sizeof(const void *);
208
209 /* Examine output for valid calls */
210#define TEST_EQUAL_REF(i, message) \
211 assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
212
213#define NFREE_READ(out, i) out[(i) * 3]
214#define NREGS_READ(out, i) out[(i) * 3 + 1]
215#define SIZE_READ(out, i) out[(i) * 3 + 2]
216
217 out_sz_ref = out_sz /= 2;
218 in_sz /= 2;
219 TEST_UTIL_BATCH_VALID;
220 expect_zu_le(sz, SIZE_READ(out, 0),
221 "Extent size should be at least allocation size");
222 expect_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
223 "Extent size should be a multiple of page size");
224 /*
225 * See the corresponding comment in test_query; profiling breaks
226 * our slab count expectations.
227 */
228 if (sz <= SC_SMALL_MAXCLASS && !opt_prof) {
229 expect_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
230 "Extent free count exceeded region count");
231 expect_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
232 "Extent region count exceeded size");
233 expect_zu_ne(NREGS_READ(out, 0), 0,
234 "Extent region count must be positive");
235 } else if (sz > SC_SMALL_MAXCLASS) {
236 expect_zu_eq(NFREE_READ(out, 0), 0,
237 "Extent free count should be zero");
238 expect_zu_eq(NREGS_READ(out, 0), 1,
239 "Extent region count should be one");
240 }
241 TEST_EQUAL_REF(1,
242 "Should not overwrite content beyond what's needed");
243 in_sz *= 2;
244 out_sz_ref = out_sz *= 2;
245
246 memcpy(out_ref, out, 3 * sizeof(size_t));
247 TEST_UTIL_BATCH_VALID;
248 TEST_EQUAL_REF(0, "Statistics should be stable across calls");
249 if (sz <= SC_SMALL_MAXCLASS) {
250 expect_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
251 "Extent free count exceeded region count");
252 } else {
253 expect_zu_eq(NFREE_READ(out, 0), 0,
254 "Extent free count should be zero");
255 }
256 expect_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
257 "Extent region count should be same for same region size");
258 expect_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
259 "Extent size should be same for same region size");
260
261#undef SIZE_READ
262#undef NREGS_READ
263#undef NFREE_READ
264
265#undef TEST_EQUAL_REF
266
267 free(q);
268 free(p);
269 }
270}
271TEST_END
272
273int
274main(void) {
275 assert_zu_lt(SC_SMALL_MAXCLASS + 100000, TEST_MAX_SIZE,
276 "Test case cannot cover large classes");
277 return test(test_query, test_batch);
278}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/inspect.sh b/examples/redis-unstable/deps/jemalloc/test/unit/inspect.sh
deleted file mode 100644
index 352d110..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/inspect.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:false"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/junk.c b/examples/redis-unstable/deps/jemalloc/test/unit/junk.c
deleted file mode 100644
index 543092f..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/junk.c
+++ /dev/null
@@ -1,195 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
4static size_t ptr_ind;
5static void *volatile ptrs[100];
6static void *last_junked_ptr;
7static size_t last_junked_usize;
8
9static void
10reset() {
11 ptr_ind = 0;
12 last_junked_ptr = NULL;
13 last_junked_usize = 0;
14}
15
16static void
17test_junk(void *ptr, size_t usize) {
18 last_junked_ptr = ptr;
19 last_junked_usize = usize;
20}
21
22static void
23do_allocs(size_t size, bool zero, size_t lg_align) {
24#define JUNK_ALLOC(...) \
25 do { \
26 assert(ptr_ind + 1 < arraylen(ptrs)); \
27 void *ptr = __VA_ARGS__; \
28 assert_ptr_not_null(ptr, ""); \
29 ptrs[ptr_ind++] = ptr; \
30 if (opt_junk_alloc && !zero) { \
31 expect_ptr_eq(ptr, last_junked_ptr, ""); \
32 expect_zu_eq(last_junked_usize, \
33 TEST_MALLOC_SIZE(ptr), ""); \
34 } \
35 } while (0)
36 if (!zero && lg_align == 0) {
37 JUNK_ALLOC(malloc(size));
38 }
39 if (!zero) {
40 JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
41 }
42#ifdef JEMALLOC_OVERRIDE_MEMALIGN
43 if (!zero) {
44 JUNK_ALLOC(je_memalign(1 << lg_align, size));
45 }
46#endif
47#ifdef JEMALLOC_OVERRIDE_VALLOC
48 if (!zero && lg_align == LG_PAGE) {
49 JUNK_ALLOC(je_valloc(size));
50 }
51#endif
52 int zero_flag = zero ? MALLOCX_ZERO : 0;
53 JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
54 JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
55 | MALLOCX_TCACHE_NONE));
56 if (lg_align >= LG_SIZEOF_PTR) {
57 void *memalign_result;
58 int err = posix_memalign(&memalign_result, (1 << lg_align),
59 size);
60 assert_d_eq(err, 0, "");
61 JUNK_ALLOC(memalign_result);
62 }
63}
64
65TEST_BEGIN(test_junk_alloc_free) {
66 bool zerovals[] = {false, true};
67 size_t sizevals[] = {
68 1, 8, 100, 1000, 100*1000
69 /*
70 * Memory allocation failure is a real possibility in 32-bit mode.
71 * Rather than try to check in the face of resource exhaustion, we just
72 * rely more on the 64-bit tests. This is a little bit white-box-y in
73 * the sense that this is only a good test strategy if we know that the
74 * junk pathways don't touch interact with the allocation selection
75 * mechanisms; but this is in fact the case.
76 */
77#if LG_SIZEOF_PTR == 3
78 , 10 * 1000 * 1000
79#endif
80 };
81 size_t lg_alignvals[] = {
82 0, 4, 10, 15, 16, LG_PAGE
83#if LG_SIZEOF_PTR == 3
84 , 20, 24
85#endif
86 };
87
88#define JUNK_FREE(...) \
89 do { \
90 do_allocs(size, zero, lg_align); \
91 for (size_t n = 0; n < ptr_ind; n++) { \
92 void *ptr = ptrs[n]; \
93 __VA_ARGS__; \
94 if (opt_junk_free) { \
95 assert_ptr_eq(ptr, last_junked_ptr, \
96 ""); \
97 assert_zu_eq(usize, last_junked_usize, \
98 ""); \
99 } \
100 reset(); \
101 } \
102 } while (0)
103 for (size_t i = 0; i < arraylen(zerovals); i++) {
104 for (size_t j = 0; j < arraylen(sizevals); j++) {
105 for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
106 bool zero = zerovals[i];
107 size_t size = sizevals[j];
108 size_t lg_align = lg_alignvals[k];
109 size_t usize = nallocx(size,
110 MALLOCX_LG_ALIGN(lg_align));
111
112 JUNK_FREE(free(ptr));
113 JUNK_FREE(dallocx(ptr, 0));
114 JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
115 JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
116 lg_align)));
117 JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
118 lg_align)));
119 JUNK_FREE(sdallocx(ptr, usize,
120 MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
121 if (opt_zero_realloc_action
122 == zero_realloc_action_free) {
123 JUNK_FREE(realloc(ptr, 0));
124 }
125 }
126 }
127 }
128}
129TEST_END
130
131TEST_BEGIN(test_realloc_expand) {
132 char *volatile ptr;
133 char *volatile expanded;
134
135 test_skip_if(!opt_junk_alloc);
136
137 /* Realloc */
138 ptr = malloc(SC_SMALL_MAXCLASS);
139 expanded = realloc(ptr, SC_LARGE_MINCLASS);
140 expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
141 expect_zu_eq(last_junked_usize,
142 SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
143 free(expanded);
144
145 /* rallocx(..., 0) */
146 ptr = malloc(SC_SMALL_MAXCLASS);
147 expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
148 expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
149 expect_zu_eq(last_junked_usize,
150 SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
151 free(expanded);
152
153 /* rallocx(..., nonzero) */
154 ptr = malloc(SC_SMALL_MAXCLASS);
155 expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
156 expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
157 expect_zu_eq(last_junked_usize,
158 SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
159 free(expanded);
160
161 /* rallocx(..., MALLOCX_ZERO) */
162 ptr = malloc(SC_SMALL_MAXCLASS);
163 last_junked_ptr = (void *)-1;
164 last_junked_usize = (size_t)-1;
165 expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
166 expect_ptr_eq(last_junked_ptr, (void *)-1, "");
167 expect_zu_eq(last_junked_usize, (size_t)-1, "");
168 free(expanded);
169
170 /*
171 * Unfortunately, testing xallocx reliably is difficult to do portably
172 * (since allocations can be expanded / not expanded differently on
173 * different platforms. We rely on manual inspection there -- the
174 * xallocx pathway is easy to inspect, though.
175 *
176 * Likewise, we don't test the shrinking pathways. It's difficult to do
177 * so consistently (because of the risk of split failure or memory
178 * exhaustion, in which case no junking should happen). This is fine
179 * -- junking is a best-effort debug mechanism in the first place.
180 */
181}
182TEST_END
183
184int
185main(void) {
186 junk_alloc_callback = &test_junk;
187 junk_free_callback = &test_junk;
188 /*
189 * We check the last pointer junked. If a reentrant call happens, that
190 * might be an internal allocation.
191 */
192 return test_no_reentrancy(
193 test_junk_alloc_free,
194 test_realloc_expand);
195}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/junk.sh b/examples/redis-unstable/deps/jemalloc/test/unit/junk.sh
deleted file mode 100644
index 97cd8ca..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/junk.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_fill}" = "x1" ] ; then
4 export MALLOC_CONF="abort:false,zero:false,junk:true"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.c b/examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.c
deleted file mode 100644
index a442a0c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "junk.c"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.sh b/examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.sh
deleted file mode 100644
index e1008c2..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/junk_alloc.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_fill}" = "x1" ] ; then
4 export MALLOC_CONF="abort:false,zero:false,junk:alloc"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/junk_free.c b/examples/redis-unstable/deps/jemalloc/test/unit/junk_free.c
deleted file mode 100644
index a442a0c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/junk_free.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "junk.c"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/junk_free.sh b/examples/redis-unstable/deps/jemalloc/test/unit/junk_free.sh
deleted file mode 100644
index 402196c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/junk_free.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_fill}" = "x1" ] ; then
4 export MALLOC_CONF="abort:false,zero:false,junk:free"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/log.c b/examples/redis-unstable/deps/jemalloc/test/unit/log.c
deleted file mode 100644
index c09b589..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/log.c
+++ /dev/null
@@ -1,198 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/log.h"
4
5static void
6update_log_var_names(const char *names) {
7 strncpy(log_var_names, names, sizeof(log_var_names));
8}
9
10static void
11expect_no_logging(const char *names) {
12 log_var_t log_l1 = LOG_VAR_INIT("l1");
13 log_var_t log_l2 = LOG_VAR_INIT("l2");
14 log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
15
16 update_log_var_names(names);
17
18 int count = 0;
19
20 for (int i = 0; i < 10; i++) {
21 log_do_begin(log_l1)
22 count++;
23 log_do_end(log_l1)
24
25 log_do_begin(log_l2)
26 count++;
27 log_do_end(log_l2)
28
29 log_do_begin(log_l2_a)
30 count++;
31 log_do_end(log_l2_a)
32 }
33 expect_d_eq(count, 0, "Disabled logging not ignored!");
34}
35
36TEST_BEGIN(test_log_disabled) {
37 test_skip_if(!config_log);
38 atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
39 expect_no_logging("");
40 expect_no_logging("abc");
41 expect_no_logging("a.b.c");
42 expect_no_logging("l12");
43 expect_no_logging("l123|a456|b789");
44 expect_no_logging("|||");
45}
46TEST_END
47
48TEST_BEGIN(test_log_enabled_direct) {
49 test_skip_if(!config_log);
50 atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
51 log_var_t log_l1 = LOG_VAR_INIT("l1");
52 log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
53 log_var_t log_l2 = LOG_VAR_INIT("l2");
54
55 int count;
56
57 count = 0;
58 update_log_var_names("l1");
59 for (int i = 0; i < 10; i++) {
60 log_do_begin(log_l1)
61 count++;
62 log_do_end(log_l1)
63 }
64 expect_d_eq(count, 10, "Mis-logged!");
65
66 count = 0;
67 update_log_var_names("l1.a");
68 for (int i = 0; i < 10; i++) {
69 log_do_begin(log_l1_a)
70 count++;
71 log_do_end(log_l1_a)
72 }
73 expect_d_eq(count, 10, "Mis-logged!");
74
75 count = 0;
76 update_log_var_names("l1.a|abc|l2|def");
77 for (int i = 0; i < 10; i++) {
78 log_do_begin(log_l1_a)
79 count++;
80 log_do_end(log_l1_a)
81
82 log_do_begin(log_l2)
83 count++;
84 log_do_end(log_l2)
85 }
86 expect_d_eq(count, 20, "Mis-logged!");
87}
88TEST_END
89
90TEST_BEGIN(test_log_enabled_indirect) {
91 test_skip_if(!config_log);
92 atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
93 update_log_var_names("l0|l1|abc|l2.b|def");
94
95 /* On. */
96 log_var_t log_l1 = LOG_VAR_INIT("l1");
97 /* Off. */
98 log_var_t log_l1a = LOG_VAR_INIT("l1a");
99 /* On. */
100 log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
101 /* Off. */
102 log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
103 /* On. */
104 log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a");
105 /* On. */
106 log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b");
107
108 /* 4 are on total, so should sum to 40. */
109 int count = 0;
110 for (int i = 0; i < 10; i++) {
111 log_do_begin(log_l1)
112 count++;
113 log_do_end(log_l1)
114
115 log_do_begin(log_l1a)
116 count++;
117 log_do_end(log_l1a)
118
119 log_do_begin(log_l1_a)
120 count++;
121 log_do_end(log_l1_a)
122
123 log_do_begin(log_l2_a)
124 count++;
125 log_do_end(log_l2_a)
126
127 log_do_begin(log_l2_b_a)
128 count++;
129 log_do_end(log_l2_b_a)
130
131 log_do_begin(log_l2_b_b)
132 count++;
133 log_do_end(log_l2_b_b)
134 }
135
136 expect_d_eq(count, 40, "Mis-logged!");
137}
138TEST_END
139
140TEST_BEGIN(test_log_enabled_global) {
141 test_skip_if(!config_log);
142 atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
143 update_log_var_names("abc|.|def");
144
145 log_var_t log_l1 = LOG_VAR_INIT("l1");
146 log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a");
147
148 int count = 0;
149 for (int i = 0; i < 10; i++) {
150 log_do_begin(log_l1)
151 count++;
152 log_do_end(log_l1)
153
154 log_do_begin(log_l2_a_a)
155 count++;
156 log_do_end(log_l2_a_a)
157 }
158 expect_d_eq(count, 20, "Mis-logged!");
159}
160TEST_END
161
162TEST_BEGIN(test_logs_if_no_init) {
163 test_skip_if(!config_log);
164 atomic_store_b(&log_init_done, false, ATOMIC_RELAXED);
165
166 log_var_t l = LOG_VAR_INIT("definitely.not.enabled");
167
168 int count = 0;
169 for (int i = 0; i < 10; i++) {
170 log_do_begin(l)
171 count++;
172 log_do_end(l)
173 }
174 expect_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
175}
176TEST_END
177
178/*
179 * This really just checks to make sure that this usage compiles; we don't have
180 * any test code to run.
181 */
182TEST_BEGIN(test_log_only_format_string) {
183 if (false) {
184 LOG("log_str", "No arguments follow this format string.");
185 }
186}
187TEST_END
188
189int
190main(void) {
191 return test(
192 test_log_disabled,
193 test_log_enabled_direct,
194 test_log_enabled_indirect,
195 test_log_enabled_global,
196 test_logs_if_no_init,
197 test_log_only_format_string);
198}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/mallctl.c b/examples/redis-unstable/deps/jemalloc/test/unit/mallctl.c
deleted file mode 100644
index 6efc8f1..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/mallctl.c
+++ /dev/null
@@ -1,1274 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/ctl.h"
4#include "jemalloc/internal/hook.h"
5#include "jemalloc/internal/util.h"
6
7TEST_BEGIN(test_mallctl_errors) {
8 uint64_t epoch;
9 size_t sz;
10
11 expect_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
12 "mallctl() should return ENOENT for non-existent names");
13
14 expect_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
15 EPERM, "mallctl() should return EPERM on attempt to write "
16 "read-only value");
17
18 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
19 sizeof(epoch)-1), EINVAL,
20 "mallctl() should return EINVAL for input size mismatch");
21 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
22 sizeof(epoch)+1), EINVAL,
23 "mallctl() should return EINVAL for input size mismatch");
24
25 sz = sizeof(epoch)-1;
26 expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
27 "mallctl() should return EINVAL for output size mismatch");
28 sz = sizeof(epoch)+1;
29 expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
30 "mallctl() should return EINVAL for output size mismatch");
31}
32TEST_END
33
34TEST_BEGIN(test_mallctlnametomib_errors) {
35 size_t mib[1];
36 size_t miblen;
37
38 miblen = sizeof(mib)/sizeof(size_t);
39 expect_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
40 "mallctlnametomib() should return ENOENT for non-existent names");
41}
42TEST_END
43
44TEST_BEGIN(test_mallctlbymib_errors) {
45 uint64_t epoch;
46 size_t sz;
47 size_t mib[1];
48 size_t miblen;
49
50 miblen = sizeof(mib)/sizeof(size_t);
51 expect_d_eq(mallctlnametomib("version", mib, &miblen), 0,
52 "Unexpected mallctlnametomib() failure");
53
54 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
55 strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
56 "attempt to write read-only value");
57
58 miblen = sizeof(mib)/sizeof(size_t);
59 expect_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
60 "Unexpected mallctlnametomib() failure");
61
62 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
63 sizeof(epoch)-1), EINVAL,
64 "mallctlbymib() should return EINVAL for input size mismatch");
65 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
66 sizeof(epoch)+1), EINVAL,
67 "mallctlbymib() should return EINVAL for input size mismatch");
68
69 sz = sizeof(epoch)-1;
70 expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
71 EINVAL,
72 "mallctlbymib() should return EINVAL for output size mismatch");
73 sz = sizeof(epoch)+1;
74 expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
75 EINVAL,
76 "mallctlbymib() should return EINVAL for output size mismatch");
77}
78TEST_END
79
80TEST_BEGIN(test_mallctl_read_write) {
81 uint64_t old_epoch, new_epoch;
82 size_t sz = sizeof(old_epoch);
83
84 /* Blind. */
85 expect_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
86 "Unexpected mallctl() failure");
87 expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
88
89 /* Read. */
90 expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
91 "Unexpected mallctl() failure");
92 expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
93
94 /* Write. */
95 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
96 sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
97 expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
98
99 /* Read+write. */
100 expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
101 (void *)&new_epoch, sizeof(new_epoch)), 0,
102 "Unexpected mallctl() failure");
103 expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
104}
105TEST_END
106
107TEST_BEGIN(test_mallctlnametomib_short_mib) {
108 size_t mib[4];
109 size_t miblen;
110
111 miblen = 3;
112 mib[3] = 42;
113 expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
114 "Unexpected mallctlnametomib() failure");
115 expect_zu_eq(miblen, 3, "Unexpected mib output length");
116 expect_zu_eq(mib[3], 42,
117 "mallctlnametomib() wrote past the end of the input mib");
118}
119TEST_END
120
121TEST_BEGIN(test_mallctlnametomib_short_name) {
122 size_t mib[4];
123 size_t miblen;
124
125 miblen = 4;
126 mib[3] = 42;
127 expect_d_eq(mallctlnametomib("arenas.bin.0", mib, &miblen), 0,
128 "Unexpected mallctlnametomib() failure");
129 expect_zu_eq(miblen, 3, "Unexpected mib output length");
130 expect_zu_eq(mib[3], 42,
131 "mallctlnametomib() wrote past the end of the input mib");
132}
133TEST_END
134
135TEST_BEGIN(test_mallctlmibnametomib) {
136 size_t mib[4];
137 size_t miblen = 4;
138 uint32_t result, result_ref;
139 size_t len_result = sizeof(uint32_t);
140
141 tsd_t *tsd = tsd_fetch();
142
143 /* Error cases */
144 assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "bob", &miblen), ENOENT, "");
145 assert_zu_eq(miblen, 4, "");
146 assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "9999", &miblen), ENOENT, "");
147 assert_zu_eq(miblen, 4, "");
148
149 /* Valid case. */
150 assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "arenas", &miblen), 0, "");
151 assert_zu_eq(miblen, 1, "");
152 miblen = 4;
153 assert_d_eq(ctl_mibnametomib(tsd, mib, 1, "bin", &miblen), 0, "");
154 assert_zu_eq(miblen, 2, "");
155 expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
156 ENOENT, "mallctlbymib() should fail on partial path");
157
158 /* Error cases. */
159 miblen = 4;
160 assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "bob", &miblen), ENOENT, "");
161 assert_zu_eq(miblen, 4, "");
162 assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "9999", &miblen), ENOENT, "");
163 assert_zu_eq(miblen, 4, "");
164
165 /* Valid case. */
166 assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "0", &miblen), 0, "");
167 assert_zu_eq(miblen, 3, "");
168 expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
169 ENOENT, "mallctlbymib() should fail on partial path");
170
171 /* Error cases. */
172 miblen = 4;
173 assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "bob", &miblen), ENOENT, "");
174 assert_zu_eq(miblen, 4, "");
175 assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "9999", &miblen), ENOENT, "");
176 assert_zu_eq(miblen, 4, "");
177
178 /* Valid case. */
179 assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "nregs", &miblen), 0, "");
180 assert_zu_eq(miblen, 4, "");
181 assert_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
182 0, "Unexpected mallctlbymib() failure");
183 assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
184 NULL, 0), 0, "Unexpected mallctl() failure");
185 expect_zu_eq(result, result_ref,
186 "mallctlbymib() and mallctl() returned different result");
187}
188TEST_END
189
190TEST_BEGIN(test_mallctlbymibname) {
191 size_t mib[4];
192 size_t miblen = 4;
193 uint32_t result, result_ref;
194 size_t len_result = sizeof(uint32_t);
195
196 tsd_t *tsd = tsd_fetch();
197
198 /* Error cases. */
199
200 assert_d_eq(mallctlnametomib("arenas", mib, &miblen), 0,
201 "Unexpected mallctlnametomib() failure");
202 assert_zu_eq(miblen, 1, "");
203
204 miblen = 4;
205 assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0", &miblen,
206 &result, &len_result, NULL, 0), ENOENT, "");
207 miblen = 4;
208 assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.bob", &miblen,
209 &result, &len_result, NULL, 0), ENOENT, "");
210 assert_zu_eq(miblen, 4, "");
211
212 /* Valid cases. */
213
214 assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
215 NULL, 0), 0, "Unexpected mallctl() failure");
216 miblen = 4;
217
218 assert_d_eq(ctl_bymibname(tsd, mib, 0, "arenas.bin.0.nregs", &miblen,
219 &result, &len_result, NULL, 0), 0, "");
220 assert_zu_eq(miblen, 4, "");
221 expect_zu_eq(result, result_ref, "Unexpected result");
222
223 assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.nregs", &miblen, &result,
224 &len_result, NULL, 0), 0, "");
225 assert_zu_eq(miblen, 4, "");
226 expect_zu_eq(result, result_ref, "Unexpected result");
227
228 assert_d_eq(ctl_bymibname(tsd, mib, 2, "0.nregs", &miblen, &result,
229 &len_result, NULL, 0), 0, "");
230 assert_zu_eq(miblen, 4, "");
231 expect_zu_eq(result, result_ref, "Unexpected result");
232
233 assert_d_eq(ctl_bymibname(tsd, mib, 3, "nregs", &miblen, &result,
234 &len_result, NULL, 0), 0, "");
235 assert_zu_eq(miblen, 4, "");
236 expect_zu_eq(result, result_ref, "Unexpected result");
237}
238TEST_END
239
240TEST_BEGIN(test_mallctl_config) {
241#define TEST_MALLCTL_CONFIG(config, t) do { \
242 t oldval; \
243 size_t sz = sizeof(oldval); \
244 expect_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
245 NULL, 0), 0, "Unexpected mallctl() failure"); \
246 expect_b_eq(oldval, config_##config, "Incorrect config value"); \
247 expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
248} while (0)
249
250 TEST_MALLCTL_CONFIG(cache_oblivious, bool);
251 TEST_MALLCTL_CONFIG(debug, bool);
252 TEST_MALLCTL_CONFIG(fill, bool);
253 TEST_MALLCTL_CONFIG(lazy_lock, bool);
254 TEST_MALLCTL_CONFIG(malloc_conf, const char *);
255 TEST_MALLCTL_CONFIG(prof, bool);
256 TEST_MALLCTL_CONFIG(prof_libgcc, bool);
257 TEST_MALLCTL_CONFIG(prof_libunwind, bool);
258 TEST_MALLCTL_CONFIG(stats, bool);
259 TEST_MALLCTL_CONFIG(utrace, bool);
260 TEST_MALLCTL_CONFIG(xmalloc, bool);
261
262#undef TEST_MALLCTL_CONFIG
263}
264TEST_END
265
266TEST_BEGIN(test_mallctl_opt) {
267 bool config_always = true;
268
269#define TEST_MALLCTL_OPT(t, opt, config) do { \
270 t oldval; \
271 size_t sz = sizeof(oldval); \
272 int expected = config_##config ? 0 : ENOENT; \
273 int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
274 0); \
275 expect_d_eq(result, expected, \
276 "Unexpected mallctl() result for opt."#opt); \
277 expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
278} while (0)
279
280 TEST_MALLCTL_OPT(bool, abort, always);
281 TEST_MALLCTL_OPT(bool, abort_conf, always);
282 TEST_MALLCTL_OPT(bool, cache_oblivious, always);
283 TEST_MALLCTL_OPT(bool, trust_madvise, always);
284 TEST_MALLCTL_OPT(bool, confirm_conf, always);
285 TEST_MALLCTL_OPT(const char *, metadata_thp, always);
286 TEST_MALLCTL_OPT(bool, retain, always);
287 TEST_MALLCTL_OPT(const char *, dss, always);
288 TEST_MALLCTL_OPT(bool, hpa, always);
289 TEST_MALLCTL_OPT(size_t, hpa_slab_max_alloc, always);
290 TEST_MALLCTL_OPT(size_t, hpa_sec_nshards, always);
291 TEST_MALLCTL_OPT(size_t, hpa_sec_max_alloc, always);
292 TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
293 TEST_MALLCTL_OPT(size_t, hpa_sec_bytes_after_flush, always);
294 TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
295 TEST_MALLCTL_OPT(unsigned, narenas, always);
296 TEST_MALLCTL_OPT(const char *, percpu_arena, always);
297 TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
298 TEST_MALLCTL_OPT(bool, background_thread, always);
299 TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
300 TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
301 TEST_MALLCTL_OPT(bool, stats_print, always);
302 TEST_MALLCTL_OPT(const char *, stats_print_opts, always);
303 TEST_MALLCTL_OPT(int64_t, stats_interval, always);
304 TEST_MALLCTL_OPT(const char *, stats_interval_opts, always);
305 TEST_MALLCTL_OPT(const char *, junk, fill);
306 TEST_MALLCTL_OPT(bool, zero, fill);
307 TEST_MALLCTL_OPT(bool, utrace, utrace);
308 TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
309 TEST_MALLCTL_OPT(bool, tcache, always);
310 TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always);
311 TEST_MALLCTL_OPT(size_t, tcache_max, always);
312 TEST_MALLCTL_OPT(const char *, thp, always);
313 TEST_MALLCTL_OPT(const char *, zero_realloc, always);
314 TEST_MALLCTL_OPT(bool, prof, prof);
315 TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
316 TEST_MALLCTL_OPT(bool, prof_active, prof);
317 TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
318 TEST_MALLCTL_OPT(bool, prof_accum, prof);
319 TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
320 TEST_MALLCTL_OPT(bool, prof_gdump, prof);
321 TEST_MALLCTL_OPT(bool, prof_final, prof);
322 TEST_MALLCTL_OPT(bool, prof_leak, prof);
323 TEST_MALLCTL_OPT(bool, prof_leak_error, prof);
324 TEST_MALLCTL_OPT(ssize_t, prof_recent_alloc_max, prof);
325 TEST_MALLCTL_OPT(bool, prof_stats, prof);
326 TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof);
327 TEST_MALLCTL_OPT(ssize_t, lg_san_uaf_align, uaf_detection);
328
329#undef TEST_MALLCTL_OPT
330}
331TEST_END
332
333TEST_BEGIN(test_manpage_example) {
334 unsigned nbins, i;
335 size_t mib[4];
336 size_t len, miblen;
337
338 len = sizeof(nbins);
339 expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
340 "Unexpected mallctl() failure");
341
342 miblen = 4;
343 expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
344 "Unexpected mallctlnametomib() failure");
345 for (i = 0; i < nbins; i++) {
346 size_t bin_size;
347
348 mib[2] = i;
349 len = sizeof(bin_size);
350 expect_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
351 NULL, 0), 0, "Unexpected mallctlbymib() failure");
352 /* Do something with bin_size... */
353 }
354}
355TEST_END
356
357TEST_BEGIN(test_tcache_none) {
358 test_skip_if(!opt_tcache);
359
360 /* Allocate p and q. */
361 void *p0 = mallocx(42, 0);
362 expect_ptr_not_null(p0, "Unexpected mallocx() failure");
363 void *q = mallocx(42, 0);
364 expect_ptr_not_null(q, "Unexpected mallocx() failure");
365
366 /* Deallocate p and q, but bypass the tcache for q. */
367 dallocx(p0, 0);
368 dallocx(q, MALLOCX_TCACHE_NONE);
369
370 /* Make sure that tcache-based allocation returns p, not q. */
371 void *p1 = mallocx(42, 0);
372 expect_ptr_not_null(p1, "Unexpected mallocx() failure");
373 if (!opt_prof && !san_uaf_detection_enabled()) {
374 expect_ptr_eq(p0, p1,
375 "Expected tcache to allocate cached region");
376 }
377
378 /* Clean up. */
379 dallocx(p1, MALLOCX_TCACHE_NONE);
380}
381TEST_END
382
383TEST_BEGIN(test_tcache) {
384#define NTCACHES 10
385 unsigned tis[NTCACHES];
386 void *ps[NTCACHES];
387 void *qs[NTCACHES];
388 unsigned i;
389 size_t sz, psz, qsz;
390
391 psz = 42;
392 qsz = nallocx(psz, 0) + 1;
393
394 /* Create tcaches. */
395 for (i = 0; i < NTCACHES; i++) {
396 sz = sizeof(unsigned);
397 expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
398 0), 0, "Unexpected mallctl() failure, i=%u", i);
399 }
400
401 /* Exercise tcache ID recycling. */
402 for (i = 0; i < NTCACHES; i++) {
403 expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
404 (void *)&tis[i], sizeof(unsigned)), 0,
405 "Unexpected mallctl() failure, i=%u", i);
406 }
407 for (i = 0; i < NTCACHES; i++) {
408 sz = sizeof(unsigned);
409 expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
410 0), 0, "Unexpected mallctl() failure, i=%u", i);
411 }
412
413 /* Flush empty tcaches. */
414 for (i = 0; i < NTCACHES; i++) {
415 expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
416 sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
417 i);
418 }
419
420 /* Cache some allocations. */
421 for (i = 0; i < NTCACHES; i++) {
422 ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
423 expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
424 i);
425 dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
426
427 qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
428 expect_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
429 i);
430 dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
431 }
432
433 /* Verify that tcaches allocate cached regions. */
434 for (i = 0; i < NTCACHES; i++) {
435 void *p0 = ps[i];
436 ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
437 expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
438 i);
439 if (!san_uaf_detection_enabled()) {
440 expect_ptr_eq(ps[i], p0, "Expected mallocx() to "
441 "allocate cached region, i=%u", i);
442 }
443 }
444
445 /* Verify that reallocation uses cached regions. */
446 for (i = 0; i < NTCACHES; i++) {
447 void *q0 = qs[i];
448 qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
449 expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
450 i);
451 if (!san_uaf_detection_enabled()) {
452 expect_ptr_eq(qs[i], q0, "Expected rallocx() to "
453 "allocate cached region, i=%u", i);
454 }
455 /* Avoid undefined behavior in case of test failure. */
456 if (qs[i] == NULL) {
457 qs[i] = ps[i];
458 }
459 }
460 for (i = 0; i < NTCACHES; i++) {
461 dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
462 }
463
464 /* Flush some non-empty tcaches. */
465 for (i = 0; i < NTCACHES/2; i++) {
466 expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
467 sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
468 i);
469 }
470
471 /* Destroy tcaches. */
472 for (i = 0; i < NTCACHES; i++) {
473 expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
474 (void *)&tis[i], sizeof(unsigned)), 0,
475 "Unexpected mallctl() failure, i=%u", i);
476 }
477}
478TEST_END
479
480TEST_BEGIN(test_thread_arena) {
481 unsigned old_arena_ind, new_arena_ind, narenas;
482
483 const char *opa;
484 size_t sz = sizeof(opa);
485 expect_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
486 "Unexpected mallctl() failure");
487
488 sz = sizeof(unsigned);
489 expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
490 0, "Unexpected mallctl() failure");
491 if (opt_oversize_threshold != 0) {
492 narenas--;
493 }
494 expect_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
495
496 if (strcmp(opa, "disabled") == 0) {
497 new_arena_ind = narenas - 1;
498 expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
499 (void *)&new_arena_ind, sizeof(unsigned)), 0,
500 "Unexpected mallctl() failure");
501 new_arena_ind = 0;
502 expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
503 (void *)&new_arena_ind, sizeof(unsigned)), 0,
504 "Unexpected mallctl() failure");
505 } else {
506 expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
507 NULL, 0), 0, "Unexpected mallctl() failure");
508 new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
509 if (old_arena_ind != new_arena_ind) {
510 expect_d_eq(mallctl("thread.arena",
511 (void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
512 sizeof(unsigned)), EPERM, "thread.arena ctl "
513 "should not be allowed with percpu arena");
514 }
515 }
516}
517TEST_END
518
519TEST_BEGIN(test_arena_i_initialized) {
520 unsigned narenas, i;
521 size_t sz;
522 size_t mib[3];
523 size_t miblen = sizeof(mib) / sizeof(size_t);
524 bool initialized;
525
526 sz = sizeof(narenas);
527 expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
528 0, "Unexpected mallctl() failure");
529
530 expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
531 "Unexpected mallctlnametomib() failure");
532 for (i = 0; i < narenas; i++) {
533 mib[1] = i;
534 sz = sizeof(initialized);
535 expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
536 0), 0, "Unexpected mallctl() failure");
537 }
538
539 mib[1] = MALLCTL_ARENAS_ALL;
540 sz = sizeof(initialized);
541 expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
542 "Unexpected mallctl() failure");
543 expect_true(initialized,
544 "Merged arena statistics should always be initialized");
545
546 /* Equivalent to the above but using mallctl() directly. */
547 sz = sizeof(initialized);
548 expect_d_eq(mallctl(
549 "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
550 (void *)&initialized, &sz, NULL, 0), 0,
551 "Unexpected mallctl() failure");
552 expect_true(initialized,
553 "Merged arena statistics should always be initialized");
554}
555TEST_END
556
557TEST_BEGIN(test_arena_i_dirty_decay_ms) {
558 ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
559 size_t sz = sizeof(ssize_t);
560
561 expect_d_eq(mallctl("arena.0.dirty_decay_ms",
562 (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
563 "Unexpected mallctl() failure");
564
565 dirty_decay_ms = -2;
566 expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
567 (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
568 "Unexpected mallctl() success");
569
570 dirty_decay_ms = 0x7fffffff;
571 expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
572 (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
573 "Unexpected mallctl() failure");
574
575 for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
576 dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
577 dirty_decay_ms++) {
578 ssize_t old_dirty_decay_ms;
579
580 expect_d_eq(mallctl("arena.0.dirty_decay_ms",
581 (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
582 sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
583 expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
584 "Unexpected old arena.0.dirty_decay_ms");
585 }
586}
587TEST_END
588
589TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
590 ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
591 size_t sz = sizeof(ssize_t);
592
593 expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
594 (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
595 "Unexpected mallctl() failure");
596
597 muzzy_decay_ms = -2;
598 expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
599 (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
600 "Unexpected mallctl() success");
601
602 muzzy_decay_ms = 0x7fffffff;
603 expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
604 (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
605 "Unexpected mallctl() failure");
606
607 for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
608 muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
609 muzzy_decay_ms++) {
610 ssize_t old_muzzy_decay_ms;
611
612 expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
613 (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
614 sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
615 expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
616 "Unexpected old arena.0.muzzy_decay_ms");
617 }
618}
619TEST_END
620
621TEST_BEGIN(test_arena_i_purge) {
622 unsigned narenas;
623 size_t sz = sizeof(unsigned);
624 size_t mib[3];
625 size_t miblen = 3;
626
627 expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
628 "Unexpected mallctl() failure");
629
630 expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
631 0, "Unexpected mallctl() failure");
632 expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
633 "Unexpected mallctlnametomib() failure");
634 mib[1] = narenas;
635 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
636 "Unexpected mallctlbymib() failure");
637
638 mib[1] = MALLCTL_ARENAS_ALL;
639 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
640 "Unexpected mallctlbymib() failure");
641}
642TEST_END
643
644TEST_BEGIN(test_arena_i_decay) {
645 unsigned narenas;
646 size_t sz = sizeof(unsigned);
647 size_t mib[3];
648 size_t miblen = 3;
649
650 expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
651 "Unexpected mallctl() failure");
652
653 expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
654 0, "Unexpected mallctl() failure");
655 expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
656 "Unexpected mallctlnametomib() failure");
657 mib[1] = narenas;
658 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
659 "Unexpected mallctlbymib() failure");
660
661 mib[1] = MALLCTL_ARENAS_ALL;
662 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
663 "Unexpected mallctlbymib() failure");
664}
665TEST_END
666
667TEST_BEGIN(test_arena_i_dss) {
668 const char *dss_prec_old, *dss_prec_new;
669 size_t sz = sizeof(dss_prec_old);
670 size_t mib[3];
671 size_t miblen;
672
673 miblen = sizeof(mib)/sizeof(size_t);
674 expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
675 "Unexpected mallctlnametomib() error");
676
677 dss_prec_new = "disabled";
678 expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
679 (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
680 "Unexpected mallctl() failure");
681 expect_str_ne(dss_prec_old, "primary",
682 "Unexpected default for dss precedence");
683
684 expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
685 (void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
686 "Unexpected mallctl() failure");
687
688 expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
689 0), 0, "Unexpected mallctl() failure");
690 expect_str_ne(dss_prec_old, "primary",
691 "Unexpected value for dss precedence");
692
693 mib[1] = narenas_total_get();
694 dss_prec_new = "disabled";
695 expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
696 (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
697 "Unexpected mallctl() failure");
698 expect_str_ne(dss_prec_old, "primary",
699 "Unexpected default for dss precedence");
700
701 expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
702 (void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
703 "Unexpected mallctl() failure");
704
705 expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
706 0), 0, "Unexpected mallctl() failure");
707 expect_str_ne(dss_prec_old, "primary",
708 "Unexpected value for dss precedence");
709}
710TEST_END
711
712TEST_BEGIN(test_arena_i_retain_grow_limit) {
713 size_t old_limit, new_limit, default_limit;
714 size_t mib[3];
715 size_t miblen;
716
717 bool retain_enabled;
718 size_t sz = sizeof(retain_enabled);
719 expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
720 0, "Unexpected mallctl() failure");
721 test_skip_if(!retain_enabled);
722
723 sz = sizeof(default_limit);
724 miblen = sizeof(mib)/sizeof(size_t);
725 expect_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
726 0, "Unexpected mallctlnametomib() error");
727
728 expect_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
729 "Unexpected mallctl() failure");
730 expect_zu_eq(default_limit, SC_LARGE_MAXCLASS,
731 "Unexpected default for retain_grow_limit");
732
733 new_limit = PAGE - 1;
734 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
735 sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
736
737 new_limit = PAGE + 1;
738 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
739 sizeof(new_limit)), 0, "Unexpected mallctl() failure");
740 expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
741 "Unexpected mallctl() failure");
742 expect_zu_eq(old_limit, PAGE,
743 "Unexpected value for retain_grow_limit");
744
745 /* Expect grow less than psize class 10. */
746 new_limit = sz_pind2sz(10) - 1;
747 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
748 sizeof(new_limit)), 0, "Unexpected mallctl() failure");
749 expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
750 "Unexpected mallctl() failure");
751 expect_zu_eq(old_limit, sz_pind2sz(9),
752 "Unexpected value for retain_grow_limit");
753
754 /* Restore to default. */
755 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
756 sizeof(default_limit)), 0, "Unexpected mallctl() failure");
757}
758TEST_END
759
760TEST_BEGIN(test_arenas_dirty_decay_ms) {
761 ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
762 size_t sz = sizeof(ssize_t);
763
764 expect_d_eq(mallctl("arenas.dirty_decay_ms",
765 (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
766 "Unexpected mallctl() failure");
767
768 dirty_decay_ms = -2;
769 expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
770 (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
771 "Unexpected mallctl() success");
772
773 dirty_decay_ms = 0x7fffffff;
774 expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
775 (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
776 "Expected mallctl() failure");
777
778 for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
779 dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
780 dirty_decay_ms++) {
781 ssize_t old_dirty_decay_ms;
782
783 expect_d_eq(mallctl("arenas.dirty_decay_ms",
784 (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
785 sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
786 expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
787 "Unexpected old arenas.dirty_decay_ms");
788 }
789}
790TEST_END
791
792TEST_BEGIN(test_arenas_muzzy_decay_ms) {
793 ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
794 size_t sz = sizeof(ssize_t);
795
796 expect_d_eq(mallctl("arenas.muzzy_decay_ms",
797 (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
798 "Unexpected mallctl() failure");
799
800 muzzy_decay_ms = -2;
801 expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
802 (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
803 "Unexpected mallctl() success");
804
805 muzzy_decay_ms = 0x7fffffff;
806 expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
807 (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
808 "Expected mallctl() failure");
809
810 for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
811 muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
812 muzzy_decay_ms++) {
813 ssize_t old_muzzy_decay_ms;
814
815 expect_d_eq(mallctl("arenas.muzzy_decay_ms",
816 (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
817 sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
818 expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
819 "Unexpected old arenas.muzzy_decay_ms");
820 }
821}
822TEST_END
823
824TEST_BEGIN(test_arenas_constants) {
825#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
826 t name; \
827 size_t sz = sizeof(t); \
828 expect_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
829 0), 0, "Unexpected mallctl() failure"); \
830 expect_zu_eq(name, expected, "Incorrect "#name" size"); \
831} while (0)
832
833 TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
834 TEST_ARENAS_CONSTANT(size_t, page, PAGE);
835 TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
836 TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
837
838#undef TEST_ARENAS_CONSTANT
839}
840TEST_END
841
842TEST_BEGIN(test_arenas_bin_constants) {
843#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
844 t name; \
845 size_t sz = sizeof(t); \
846 expect_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
847 NULL, 0), 0, "Unexpected mallctl() failure"); \
848 expect_zu_eq(name, expected, "Incorrect "#name" size"); \
849} while (0)
850
851 TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
852 TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
853 TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
854 bin_infos[0].slab_size);
855 TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards);
856
857#undef TEST_ARENAS_BIN_CONSTANT
858}
859TEST_END
860
861TEST_BEGIN(test_arenas_lextent_constants) {
862#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
863 t name; \
864 size_t sz = sizeof(t); \
865 expect_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
866 &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
867 expect_zu_eq(name, expected, "Incorrect "#name" size"); \
868} while (0)
869
870 TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
871 SC_LARGE_MINCLASS);
872
873#undef TEST_ARENAS_LEXTENT_CONSTANT
874}
875TEST_END
876
877TEST_BEGIN(test_arenas_create) {
878 unsigned narenas_before, arena, narenas_after;
879 size_t sz = sizeof(unsigned);
880
881 expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
882 NULL, 0), 0, "Unexpected mallctl() failure");
883 expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
884 "Unexpected mallctl() failure");
885 expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
886 0), 0, "Unexpected mallctl() failure");
887
888 expect_u_eq(narenas_before+1, narenas_after,
889 "Unexpected number of arenas before versus after extension");
890 expect_u_eq(arena, narenas_after-1, "Unexpected arena index");
891}
892TEST_END
893
894TEST_BEGIN(test_arenas_lookup) {
895 unsigned arena, arena1;
896 void *ptr;
897 size_t sz = sizeof(unsigned);
898
899 expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
900 "Unexpected mallctl() failure");
901 ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
902 expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
903 expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
904 0, "Unexpected mallctl() failure");
905 expect_u_eq(arena, arena1, "Unexpected arena index");
906 dallocx(ptr, 0);
907}
908TEST_END
909
910TEST_BEGIN(test_prof_active) {
911 /*
912 * If config_prof is off, then the test for prof_active in
913 * test_mallctl_opt was already enough.
914 */
915 test_skip_if(!config_prof);
916 test_skip_if(opt_prof);
917
918 bool active, old;
919 size_t len = sizeof(bool);
920
921 active = true;
922 expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), ENOENT,
923 "Setting prof_active to true should fail when opt_prof is off");
924 old = true;
925 expect_d_eq(mallctl("prof.active", &old, &len, &active, len), ENOENT,
926 "Setting prof_active to true should fail when opt_prof is off");
927 expect_true(old, "old value should not be touched when mallctl fails");
928 active = false;
929 expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), 0,
930 "Setting prof_active to false should succeed when opt_prof is off");
931 expect_d_eq(mallctl("prof.active", &old, &len, &active, len), 0,
932 "Setting prof_active to false should succeed when opt_prof is off");
933 expect_false(old, "prof_active should be false when opt_prof is off");
934}
935TEST_END
936
937TEST_BEGIN(test_stats_arenas) {
938#define TEST_STATS_ARENAS(t, name) do { \
939 t name; \
940 size_t sz = sizeof(t); \
941 expect_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
942 NULL, 0), 0, "Unexpected mallctl() failure"); \
943} while (0)
944
945 TEST_STATS_ARENAS(unsigned, nthreads);
946 TEST_STATS_ARENAS(const char *, dss);
947 TEST_STATS_ARENAS(ssize_t, dirty_decay_ms);
948 TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms);
949 TEST_STATS_ARENAS(size_t, pactive);
950 TEST_STATS_ARENAS(size_t, pdirty);
951
952#undef TEST_STATS_ARENAS
953}
954TEST_END
955
956static void
957alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
958 UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
959 *(bool *)extra = true;
960}
961
962static void
963dalloc_hook(void *extra, UNUSED hook_dalloc_t type,
964 UNUSED void *address, UNUSED uintptr_t args_raw[3]) {
965 *(bool *)extra = true;
966}
967
968TEST_BEGIN(test_hooks) {
969 bool hook_called = false;
970 hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
971 void *handle = NULL;
972 size_t sz = sizeof(handle);
973 int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
974 sizeof(hooks));
975 expect_d_eq(err, 0, "Hook installation failed");
976 expect_ptr_ne(handle, NULL, "Hook installation gave null handle");
977 void *ptr = mallocx(1, 0);
978 expect_true(hook_called, "Alloc hook not called");
979 hook_called = false;
980 free(ptr);
981 expect_true(hook_called, "Free hook not called");
982
983 err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
984 sizeof(handle));
985 expect_d_eq(err, 0, "Hook removal failed");
986 hook_called = false;
987 ptr = mallocx(1, 0);
988 free(ptr);
989 expect_false(hook_called, "Hook called after removal");
990}
991TEST_END
992
993TEST_BEGIN(test_hooks_exhaustion) {
994 bool hook_called = false;
995 hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
996
997 void *handle;
998 void *handles[HOOK_MAX];
999 size_t sz = sizeof(handle);
1000 int err;
1001 for (int i = 0; i < HOOK_MAX; i++) {
1002 handle = NULL;
1003 err = mallctl("experimental.hooks.install", &handle, &sz,
1004 &hooks, sizeof(hooks));
1005 expect_d_eq(err, 0, "Error installation hooks");
1006 expect_ptr_ne(handle, NULL, "Got NULL handle");
1007 handles[i] = handle;
1008 }
1009 err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
1010 sizeof(hooks));
1011 expect_d_eq(err, EAGAIN, "Should have failed hook installation");
1012 for (int i = 0; i < HOOK_MAX; i++) {
1013 err = mallctl("experimental.hooks.remove", NULL, NULL,
1014 &handles[i], sizeof(handles[i]));
1015 expect_d_eq(err, 0, "Hook removal failed");
1016 }
1017 /* Insertion failed, but then we removed some; it should work now. */
1018 handle = NULL;
1019 err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
1020 sizeof(hooks));
1021 expect_d_eq(err, 0, "Hook insertion failed");
1022 expect_ptr_ne(handle, NULL, "Got NULL handle");
1023 err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
1024 sizeof(handle));
1025 expect_d_eq(err, 0, "Hook removal failed");
1026}
1027TEST_END
1028
1029TEST_BEGIN(test_thread_idle) {
1030 /*
1031 * We're cheating a little bit in this test, and inferring things about
1032 * implementation internals (like tcache details). We have to;
1033 * thread.idle has no guaranteed effects. We need stats to make these
1034 * inferences.
1035 */
1036 test_skip_if(!config_stats);
1037
1038 int err;
1039 size_t sz;
1040 size_t miblen;
1041
1042 bool tcache_enabled = false;
1043 sz = sizeof(tcache_enabled);
1044 err = mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0);
1045 expect_d_eq(err, 0, "");
1046 test_skip_if(!tcache_enabled);
1047
1048 size_t tcache_max;
1049 sz = sizeof(tcache_max);
1050 err = mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0);
1051 expect_d_eq(err, 0, "");
1052 test_skip_if(tcache_max == 0);
1053
1054 unsigned arena_ind;
1055 sz = sizeof(arena_ind);
1056 err = mallctl("thread.arena", &arena_ind, &sz, NULL, 0);
1057 expect_d_eq(err, 0, "");
1058
1059 /* We're going to do an allocation of size 1, which we know is small. */
1060 size_t mib[5];
1061 miblen = sizeof(mib)/sizeof(mib[0]);
1062 err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen);
1063 expect_d_eq(err, 0, "");
1064 mib[2] = arena_ind;
1065
1066 /*
1067 * This alloc and dalloc should leave something in the tcache, in a
1068 * small size's cache bin.
1069 */
1070 void *ptr = mallocx(1, 0);
1071 dallocx(ptr, 0);
1072
1073 uint64_t epoch;
1074 err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
1075 expect_d_eq(err, 0, "");
1076
1077 uint64_t small_dalloc_pre_idle;
1078 sz = sizeof(small_dalloc_pre_idle);
1079 err = mallctlbymib(mib, miblen, &small_dalloc_pre_idle, &sz, NULL, 0);
1080 expect_d_eq(err, 0, "");
1081
1082 err = mallctl("thread.idle", NULL, NULL, NULL, 0);
1083 expect_d_eq(err, 0, "");
1084
1085 err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
1086 expect_d_eq(err, 0, "");
1087
1088 uint64_t small_dalloc_post_idle;
1089 sz = sizeof(small_dalloc_post_idle);
1090 err = mallctlbymib(mib, miblen, &small_dalloc_post_idle, &sz, NULL, 0);
1091 expect_d_eq(err, 0, "");
1092
1093 expect_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
1094 "Purge didn't flush the tcache");
1095}
1096TEST_END
1097
1098TEST_BEGIN(test_thread_peak) {
1099 test_skip_if(!config_stats);
1100
1101 /*
1102 * We don't commit to any stable amount of accuracy for peak tracking
1103 * (in practice, when this test was written, we made sure to be within
1104 * 100k). But 10MB is big for more or less any definition of big.
1105 */
1106 size_t big_size = 10 * 1024 * 1024;
1107 size_t small_size = 256;
1108
1109 void *ptr;
1110 int err;
1111 size_t sz;
1112 uint64_t peak;
1113 sz = sizeof(uint64_t);
1114
1115 err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
1116 expect_d_eq(err, 0, "");
1117 ptr = mallocx(SC_SMALL_MAXCLASS, 0);
1118 err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
1119 expect_d_eq(err, 0, "");
1120 expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Missed an update");
1121 free(ptr);
1122 err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
1123 expect_d_eq(err, 0, "");
1124 expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Freeing changed peak");
1125 ptr = mallocx(big_size, 0);
1126 free(ptr);
1127 /*
1128 * The peak should have hit big_size in the last two lines, even though
1129 * the net allocated bytes has since dropped back down to zero. We
1130 * should have noticed the peak change without having down any mallctl
1131 * calls while net allocated bytes was high.
1132 */
1133 err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
1134 expect_d_eq(err, 0, "");
1135 expect_u64_ge(peak, big_size, "Missed a peak change.");
1136
1137 /* Allocate big_size, but using small allocations. */
1138 size_t nallocs = big_size / small_size;
1139 void **ptrs = calloc(nallocs, sizeof(void *));
1140 err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
1141 expect_d_eq(err, 0, "");
1142 err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
1143 expect_d_eq(err, 0, "");
1144 expect_u64_eq(0, peak, "Missed a reset.");
1145 for (size_t i = 0; i < nallocs; i++) {
1146 ptrs[i] = mallocx(small_size, 0);
1147 }
1148 for (size_t i = 0; i < nallocs; i++) {
1149 free(ptrs[i]);
1150 }
1151 err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
1152 expect_d_eq(err, 0, "");
1153 /*
1154 * We don't guarantee exactness; make sure we're within 10% of the peak,
1155 * though.
1156 */
1157 expect_u64_ge(peak, nallocx(small_size, 0) * nallocs * 9 / 10,
1158 "Missed some peak changes.");
1159 expect_u64_le(peak, nallocx(small_size, 0) * nallocs * 11 / 10,
1160 "Overcounted peak changes.");
1161 free(ptrs);
1162}
1163TEST_END
1164
1165typedef struct activity_test_data_s activity_test_data_t;
1166struct activity_test_data_s {
1167 uint64_t obtained_alloc;
1168 uint64_t obtained_dalloc;
1169};
1170
1171static void
1172activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
1173 activity_test_data_t *test_data = (activity_test_data_t *)uctx;
1174 test_data->obtained_alloc = alloc;
1175 test_data->obtained_dalloc = dalloc;
1176}
1177
1178TEST_BEGIN(test_thread_activity_callback) {
1179 test_skip_if(!config_stats);
1180
1181 const size_t big_size = 10 * 1024 * 1024;
1182 void *ptr;
1183 int err;
1184 size_t sz;
1185
1186 uint64_t *allocatedp;
1187 uint64_t *deallocatedp;
1188 sz = sizeof(allocatedp);
1189 err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
1190 assert_d_eq(0, err, "");
1191 err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
1192 assert_d_eq(0, err, "");
1193
1194 activity_callback_thunk_t old_thunk = {(activity_callback_t)111,
1195 (void *)222};
1196
1197 activity_test_data_t test_data = {333, 444};
1198 activity_callback_thunk_t new_thunk =
1199 {&activity_test_callback, &test_data};
1200
1201 sz = sizeof(old_thunk);
1202 err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
1203 &new_thunk, sizeof(new_thunk));
1204 assert_d_eq(0, err, "");
1205
1206 expect_true(old_thunk.callback == NULL, "Callback already installed");
1207 expect_true(old_thunk.uctx == NULL, "Callback data already installed");
1208
1209 ptr = mallocx(big_size, 0);
1210 expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
1211 expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
1212
1213 free(ptr);
1214 expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
1215 expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
1216
1217 sz = sizeof(old_thunk);
1218 new_thunk = (activity_callback_thunk_t){ NULL, NULL };
1219 err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
1220 &new_thunk, sizeof(new_thunk));
1221 assert_d_eq(0, err, "");
1222
1223 expect_true(old_thunk.callback == &activity_test_callback, "");
1224 expect_true(old_thunk.uctx == &test_data, "");
1225
1226 /* Inserting NULL should have turned off tracking. */
1227 test_data.obtained_alloc = 333;
1228 test_data.obtained_dalloc = 444;
1229 ptr = mallocx(big_size, 0);
1230 free(ptr);
1231 expect_u64_eq(333, test_data.obtained_alloc, "");
1232 expect_u64_eq(444, test_data.obtained_dalloc, "");
1233}
1234TEST_END
1235
1236int
1237main(void) {
1238 return test(
1239 test_mallctl_errors,
1240 test_mallctlnametomib_errors,
1241 test_mallctlbymib_errors,
1242 test_mallctl_read_write,
1243 test_mallctlnametomib_short_mib,
1244 test_mallctlnametomib_short_name,
1245 test_mallctlmibnametomib,
1246 test_mallctlbymibname,
1247 test_mallctl_config,
1248 test_mallctl_opt,
1249 test_manpage_example,
1250 test_tcache_none,
1251 test_tcache,
1252 test_thread_arena,
1253 test_arena_i_initialized,
1254 test_arena_i_dirty_decay_ms,
1255 test_arena_i_muzzy_decay_ms,
1256 test_arena_i_purge,
1257 test_arena_i_decay,
1258 test_arena_i_dss,
1259 test_arena_i_retain_grow_limit,
1260 test_arenas_dirty_decay_ms,
1261 test_arenas_muzzy_decay_ms,
1262 test_arenas_constants,
1263 test_arenas_bin_constants,
1264 test_arenas_lextent_constants,
1265 test_arenas_create,
1266 test_arenas_lookup,
1267 test_prof_active,
1268 test_stats_arenas,
1269 test_hooks,
1270 test_hooks_exhaustion,
1271 test_thread_idle,
1272 test_thread_peak,
1273 test_thread_activity_callback);
1274}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.c b/examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.c
deleted file mode 100644
index ecfa499..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.c
+++ /dev/null
@@ -1,29 +0,0 @@
1#include "test/jemalloc_test.h"
2
3const char *malloc_conf = "dirty_decay_ms:1000";
4const char *malloc_conf_2_conf_harder = "dirty_decay_ms:1234";
5
6TEST_BEGIN(test_malloc_conf_2) {
7#ifdef _WIN32
8 bool windows = true;
9#else
10 bool windows = false;
11#endif
12 /* Windows doesn't support weak symbol linker trickery. */
13 test_skip_if(windows);
14
15 ssize_t dirty_decay_ms;
16 size_t sz = sizeof(dirty_decay_ms);
17
18 int err = mallctl("opt.dirty_decay_ms", &dirty_decay_ms, &sz, NULL, 0);
19 assert_d_eq(err, 0, "Unexpected mallctl failure");
20 expect_zd_eq(dirty_decay_ms, 1234,
21 "malloc_conf_2 setting didn't take effect");
22}
23TEST_END
24
25int
26main(void) {
27 return test(
28 test_malloc_conf_2);
29}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.sh b/examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.sh
deleted file mode 100644
index 2c780f1..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/malloc_conf_2.sh
+++ /dev/null
@@ -1 +0,0 @@
1export MALLOC_CONF="dirty_decay_ms:500"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/malloc_io.c b/examples/redis-unstable/deps/jemalloc/test/unit/malloc_io.c
deleted file mode 100644
index 385f745..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/malloc_io.c
+++ /dev/null
@@ -1,268 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_malloc_strtoumax_no_endptr) {
4 int err;
5
6 set_errno(0);
7 expect_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
8 err = get_errno();
9 expect_d_eq(err, 0, "Unexpected failure");
10}
11TEST_END
12
13TEST_BEGIN(test_malloc_strtoumax) {
14 struct test_s {
15 const char *input;
16 const char *expected_remainder;
17 int base;
18 int expected_errno;
19 const char *expected_errno_name;
20 uintmax_t expected_x;
21 };
22#define ERR(e) e, #e
23#define KUMAX(x) ((uintmax_t)x##ULL)
24#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL)
25 struct test_s tests[] = {
26 {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
27 {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
28 {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
29
30 {"", "", 0, ERR(EINVAL), UINTMAX_MAX},
31 {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
32 {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
33 {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
34
35 {"42", "", 0, ERR(0), KUMAX(42)},
36 {"+42", "", 0, ERR(0), KUMAX(42)},
37 {"-42", "", 0, ERR(0), KSMAX(-42)},
38 {"042", "", 0, ERR(0), KUMAX(042)},
39 {"+042", "", 0, ERR(0), KUMAX(042)},
40 {"-042", "", 0, ERR(0), KSMAX(-042)},
41 {"0x42", "", 0, ERR(0), KUMAX(0x42)},
42 {"+0x42", "", 0, ERR(0), KUMAX(0x42)},
43 {"-0x42", "", 0, ERR(0), KSMAX(-0x42)},
44
45 {"0", "", 0, ERR(0), KUMAX(0)},
46 {"1", "", 0, ERR(0), KUMAX(1)},
47
48 {"42", "", 0, ERR(0), KUMAX(42)},
49 {" 42", "", 0, ERR(0), KUMAX(42)},
50 {"42 ", " ", 0, ERR(0), KUMAX(42)},
51 {"0x", "x", 0, ERR(0), KUMAX(0)},
52 {"42x", "x", 0, ERR(0), KUMAX(42)},
53
54 {"07", "", 0, ERR(0), KUMAX(7)},
55 {"010", "", 0, ERR(0), KUMAX(8)},
56 {"08", "8", 0, ERR(0), KUMAX(0)},
57 {"0_", "_", 0, ERR(0), KUMAX(0)},
58
59 {"0x", "x", 0, ERR(0), KUMAX(0)},
60 {"0X", "X", 0, ERR(0), KUMAX(0)},
61 {"0xg", "xg", 0, ERR(0), KUMAX(0)},
62 {"0XA", "", 0, ERR(0), KUMAX(10)},
63
64 {"010", "", 10, ERR(0), KUMAX(10)},
65 {"0x3", "x3", 10, ERR(0), KUMAX(0)},
66
67 {"12", "2", 2, ERR(0), KUMAX(1)},
68 {"78", "8", 8, ERR(0), KUMAX(7)},
69 {"9a", "a", 10, ERR(0), KUMAX(9)},
70 {"9A", "A", 10, ERR(0), KUMAX(9)},
71 {"fg", "g", 16, ERR(0), KUMAX(15)},
72 {"FG", "G", 16, ERR(0), KUMAX(15)},
73 {"0xfg", "g", 16, ERR(0), KUMAX(15)},
74 {"0XFG", "G", 16, ERR(0), KUMAX(15)},
75 {"z_", "_", 36, ERR(0), KUMAX(35)},
76 {"Z_", "_", 36, ERR(0), KUMAX(35)}
77 };
78#undef ERR
79#undef KUMAX
80#undef KSMAX
81 unsigned i;
82
83 for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
84 struct test_s *test = &tests[i];
85 int err;
86 uintmax_t result;
87 char *remainder;
88
89 set_errno(0);
90 result = malloc_strtoumax(test->input, &remainder, test->base);
91 err = get_errno();
92 expect_d_eq(err, test->expected_errno,
93 "Expected errno %s for \"%s\", base %d",
94 test->expected_errno_name, test->input, test->base);
95 expect_str_eq(remainder, test->expected_remainder,
96 "Unexpected remainder for \"%s\", base %d",
97 test->input, test->base);
98 if (err == 0) {
99 expect_ju_eq(result, test->expected_x,
100 "Unexpected result for \"%s\", base %d",
101 test->input, test->base);
102 }
103 }
104}
105TEST_END
106
107TEST_BEGIN(test_malloc_snprintf_truncated) {
108#define BUFLEN 15
109 char buf[BUFLEN];
110 size_t result;
111 size_t len;
112#define TEST(expected_str_untruncated, ...) do { \
113 result = malloc_snprintf(buf, len, __VA_ARGS__); \
114 expect_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
115 "Unexpected string inequality (\"%s\" vs \"%s\")", \
116 buf, expected_str_untruncated); \
117 expect_zu_eq(result, strlen(expected_str_untruncated), \
118 "Unexpected result"); \
119} while (0)
120
121 for (len = 1; len < BUFLEN; len++) {
122 TEST("012346789", "012346789");
123 TEST("a0123b", "a%sb", "0123");
124 TEST("a01234567", "a%s%s", "0123", "4567");
125 TEST("a0123 ", "a%-6s", "0123");
126 TEST("a 0123", "a%6s", "0123");
127 TEST("a 012", "a%6.3s", "0123");
128 TEST("a 012", "a%*.*s", 6, 3, "0123");
129 TEST("a 123b", "a% db", 123);
130 TEST("a123b", "a%-db", 123);
131 TEST("a-123b", "a%-db", -123);
132 TEST("a+123b", "a%+db", 123);
133 }
134#undef BUFLEN
135#undef TEST
136}
137TEST_END
138
139TEST_BEGIN(test_malloc_snprintf) {
140#define BUFLEN 128
141 char buf[BUFLEN];
142 size_t result;
143#define TEST(expected_str, ...) do { \
144 result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
145 expect_str_eq(buf, expected_str, "Unexpected output"); \
146 expect_zu_eq(result, strlen(expected_str), "Unexpected result");\
147} while (0)
148
149 TEST("hello", "hello");
150
151 TEST("50%, 100%", "50%%, %d%%", 100);
152
153 TEST("a0123b", "a%sb", "0123");
154
155 TEST("a 0123b", "a%5sb", "0123");
156 TEST("a 0123b", "a%*sb", 5, "0123");
157
158 TEST("a0123 b", "a%-5sb", "0123");
159 TEST("a0123b", "a%*sb", -1, "0123");
160 TEST("a0123 b", "a%*sb", -5, "0123");
161 TEST("a0123 b", "a%-*sb", -5, "0123");
162
163 TEST("a012b", "a%.3sb", "0123");
164 TEST("a012b", "a%.*sb", 3, "0123");
165 TEST("a0123b", "a%.*sb", -3, "0123");
166
167 TEST("a 012b", "a%5.3sb", "0123");
168 TEST("a 012b", "a%5.*sb", 3, "0123");
169 TEST("a 012b", "a%*.3sb", 5, "0123");
170 TEST("a 012b", "a%*.*sb", 5, 3, "0123");
171 TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
172
173 TEST("_abcd_", "_%x_", 0xabcd);
174 TEST("_0xabcd_", "_%#x_", 0xabcd);
175 TEST("_1234_", "_%o_", 01234);
176 TEST("_01234_", "_%#o_", 01234);
177 TEST("_1234_", "_%u_", 1234);
178 TEST("01234", "%05u", 1234);
179
180 TEST("_1234_", "_%d_", 1234);
181 TEST("_ 1234_", "_% d_", 1234);
182 TEST("_+1234_", "_%+d_", 1234);
183 TEST("_-1234_", "_%d_", -1234);
184 TEST("_-1234_", "_% d_", -1234);
185 TEST("_-1234_", "_%+d_", -1234);
186
187 /*
188 * Morally, we should test these too, but 0-padded signed types are not
189 * yet supported.
190 *
191 * TEST("01234", "%05", 1234);
192 * TEST("-1234", "%05d", -1234);
193 * TEST("-01234", "%06d", -1234);
194 */
195
196 TEST("_-1234_", "_%d_", -1234);
197 TEST("_1234_", "_%d_", 1234);
198 TEST("_-1234_", "_%i_", -1234);
199 TEST("_1234_", "_%i_", 1234);
200 TEST("_01234_", "_%#o_", 01234);
201 TEST("_1234_", "_%u_", 1234);
202 TEST("_0x1234abc_", "_%#x_", 0x1234abc);
203 TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
204 TEST("_c_", "_%c_", 'c');
205 TEST("_string_", "_%s_", "string");
206 TEST("_0x42_", "_%p_", ((void *)0x42));
207
208 TEST("_-1234_", "_%ld_", ((long)-1234));
209 TEST("_1234_", "_%ld_", ((long)1234));
210 TEST("_-1234_", "_%li_", ((long)-1234));
211 TEST("_1234_", "_%li_", ((long)1234));
212 TEST("_01234_", "_%#lo_", ((long)01234));
213 TEST("_1234_", "_%lu_", ((long)1234));
214 TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
215 TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
216
217 TEST("_-1234_", "_%lld_", ((long long)-1234));
218 TEST("_1234_", "_%lld_", ((long long)1234));
219 TEST("_-1234_", "_%lli_", ((long long)-1234));
220 TEST("_1234_", "_%lli_", ((long long)1234));
221 TEST("_01234_", "_%#llo_", ((long long)01234));
222 TEST("_1234_", "_%llu_", ((long long)1234));
223 TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
224 TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
225
226 TEST("_-1234_", "_%qd_", ((long long)-1234));
227 TEST("_1234_", "_%qd_", ((long long)1234));
228 TEST("_-1234_", "_%qi_", ((long long)-1234));
229 TEST("_1234_", "_%qi_", ((long long)1234));
230 TEST("_01234_", "_%#qo_", ((long long)01234));
231 TEST("_1234_", "_%qu_", ((long long)1234));
232 TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
233 TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
234
235 TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
236 TEST("_1234_", "_%jd_", ((intmax_t)1234));
237 TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
238 TEST("_1234_", "_%ji_", ((intmax_t)1234));
239 TEST("_01234_", "_%#jo_", ((intmax_t)01234));
240 TEST("_1234_", "_%ju_", ((intmax_t)1234));
241 TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
242 TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
243
244 TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
245 TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
246 TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
247 TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
248
249 TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
250 TEST("_1234_", "_%zd_", ((ssize_t)1234));
251 TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
252 TEST("_1234_", "_%zi_", ((ssize_t)1234));
253 TEST("_01234_", "_%#zo_", ((ssize_t)01234));
254 TEST("_1234_", "_%zu_", ((ssize_t)1234));
255 TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
256 TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
257#undef BUFLEN
258}
259TEST_END
260
261int
262main(void) {
263 return test(
264 test_malloc_strtoumax_no_endptr,
265 test_malloc_strtoumax,
266 test_malloc_snprintf_truncated,
267 test_malloc_snprintf);
268}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/math.c b/examples/redis-unstable/deps/jemalloc/test/unit/math.c
deleted file mode 100644
index a32767c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/math.c
+++ /dev/null
@@ -1,390 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define MAX_REL_ERR 1.0e-9
4#define MAX_ABS_ERR 1.0e-9
5
6#include <float.h>
7
8#ifdef __PGI
9#undef INFINITY
10#endif
11
12#ifndef INFINITY
13#define INFINITY (DBL_MAX + DBL_MAX)
14#endif
15
16static bool
17double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) {
18 double rel_err;
19
20 if (fabs(a - b) < max_abs_err) {
21 return true;
22 }
23 rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
24 return (rel_err < max_rel_err);
25}
26
27static uint64_t
28factorial(unsigned x) {
29 uint64_t ret = 1;
30 unsigned i;
31
32 for (i = 2; i <= x; i++) {
33 ret *= (uint64_t)i;
34 }
35
36 return ret;
37}
38
39TEST_BEGIN(test_ln_gamma_factorial) {
40 unsigned x;
41
42 /* exp(ln_gamma(x)) == (x-1)! for integer x. */
43 for (x = 1; x <= 21; x++) {
44 expect_true(double_eq_rel(exp(ln_gamma(x)),
45 (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
46 "Incorrect factorial result for x=%u", x);
47 }
48}
49TEST_END
50
51/* Expected ln_gamma([0.0..100.0] increment=0.25). */
52static const double ln_gamma_misc_expected[] = {
53 INFINITY,
54 1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
55 0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
56 -0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
57 0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
58 0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
59 1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
60 2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
61 3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
62 5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
63 6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
64 8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
65 9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
66 11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
67 12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
68 14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
69 16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
70 18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
71 19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
72 21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
73 23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
74 25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
75 27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
76 29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
77 32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
78 34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
79 36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
80 38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
81 40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
82 43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
83 45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
84 47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
85 50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
86 52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
87 54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
88 57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
89 59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
90 62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
91 64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
92 67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
93 69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
94 72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
95 74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
96 77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
97 79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
98 82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
99 85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
100 87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
101 90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
102 93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
103 95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
104 98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
105 101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
106 103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
107 106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
108 109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
109 112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
110 114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
111 117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
112 120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
113 123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
114 126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
115 129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
116 131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
117 134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
118 137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
119 140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
120 143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
121 146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
122 149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
123 152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
124 155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
125 158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
126 161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
127 164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
128 167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
129 170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
130 173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
131 176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
132 179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
133 182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
134 185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
135 188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
136 191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
137 194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
138 197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
139 201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
140 204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
141 207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
142 210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
143 213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
144 216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
145 219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
146 223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
147 226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
148 229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
149 232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
150 235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
151 238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
152 242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
153 245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
154 248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
155 251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
156 255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
157 258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
158 261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
159 264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
160 268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
161 271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
162 274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
163 278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
164 281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
165 284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
166 287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
167 291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
168 294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
169 297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
170 301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
171 304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
172 308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
173 311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
174 314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
175 318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
176 321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
177 324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
178 328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
179 331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
180 335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
181 338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
182 341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
183 345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
184 348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
185 352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
186 355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
187 359.13420536957539753
188};
189
190TEST_BEGIN(test_ln_gamma_misc) {
191 unsigned i;
192
193 for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
194 double x = (double)i * 0.25;
195 expect_true(double_eq_rel(ln_gamma(x),
196 ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
197 "Incorrect ln_gamma result for i=%u", i);
198 }
199}
200TEST_END
201
202/* Expected pt_norm([0.01..0.99] increment=0.01). */
203static const double pt_norm_expected[] = {
204 -INFINITY,
205 -2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
206 -1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
207 -1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
208 -1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
209 -1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
210 -0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
211 -0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
212 -0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
213 -0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
214 -0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
215 -0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
216 -0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
217 -0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
218 -0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
219 -0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
220 -0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
221 -0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
222 0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
223 0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
224 0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
225 0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
226 0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
227 0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
228 0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
229 0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
230 0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
231 0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
232 0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
233 1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
234 1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
235 1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
236 1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
237 1.88079360815125041, 2.05374891063182208, 2.32634787404084076
238};
239
240TEST_BEGIN(test_pt_norm) {
241 unsigned i;
242
243 for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
244 double p = (double)i * 0.01;
245 expect_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
246 MAX_REL_ERR, MAX_ABS_ERR),
247 "Incorrect pt_norm result for i=%u", i);
248 }
249}
250TEST_END
251
252/*
253 * Expected pt_chi2(p=[0.01..0.99] increment=0.07,
254 * df={0.1, 1.1, 10.1, 100.1, 1000.1}).
255 */
256static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
257static const double pt_chi2_expected[] = {
258 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
259 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
260 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
261 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
262 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
263
264 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
265 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
266 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
267 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
268 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
269
270 2.606673548632508, 4.602913725294877, 5.646152813924212,
271 6.488971315540869, 7.249823275816285, 7.977314231410841,
272 8.700354939944047, 9.441728024225892, 10.224338321374127,
273 11.076435368801061, 12.039320937038386, 13.183878752697167,
274 14.657791935084575, 16.885728216339373, 23.361991680031817,
275
276 70.14844087392152, 80.92379498849355, 85.53325420085891,
277 88.94433120715347, 91.83732712857017, 94.46719943606301,
278 96.96896479994635, 99.43412843510363, 101.94074719829733,
279 104.57228644307247, 107.43900093448734, 110.71844673417287,
280 114.76616819871325, 120.57422505959563, 135.92318818757556,
281
282 899.0072447849649, 937.9271278858220, 953.8117189560207,
283 965.3079371501154, 974.8974061207954, 983.4936235182347,
284 991.5691170518946, 999.4334123954690, 1007.3391826856553,
285 1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
286 1046.4872561869577, 1063.5717461999654, 1107.0741966053859
287};
288
289TEST_BEGIN(test_pt_chi2) {
290 unsigned i, j;
291 unsigned e = 0;
292
293 for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
294 double df = pt_chi2_df[i];
295 double ln_gamma_df = ln_gamma(df * 0.5);
296 for (j = 1; j < 100; j += 7) {
297 double p = (double)j * 0.01;
298 expect_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
299 pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
300 "Incorrect pt_chi2 result for i=%u, j=%u", i, j);
301 e++;
302 }
303 }
304}
305TEST_END
306
307/*
308 * Expected pt_gamma(p=[0.1..0.99] increment=0.07,
309 * shape=[0.5..3.0] increment=0.5).
310 */
311static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
312static const double pt_gamma_expected[] = {
313 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
314 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
315 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
316 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
317 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
318
319 0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
320 0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
321 0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
322 1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
323 1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
324
325 0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
326 0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
327 1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
328 1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
329 2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
330
331 0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
332 0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
333 1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
334 2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
335 3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
336
337 0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
338 1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
339 1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
340 2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
341 4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
342
343 0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
344 1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
345 2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
346 3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
347 4.7230515633946677, 5.6417477865306020, 8.4059469148854635
348};
349
350TEST_BEGIN(test_pt_gamma_shape) {
351 unsigned i, j;
352 unsigned e = 0;
353
354 for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
355 double shape = pt_gamma_shape[i];
356 double ln_gamma_shape = ln_gamma(shape);
357 for (j = 1; j < 100; j += 7) {
358 double p = (double)j * 0.01;
359 expect_true(double_eq_rel(pt_gamma(p, shape, 1.0,
360 ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
361 MAX_ABS_ERR),
362 "Incorrect pt_gamma result for i=%u, j=%u", i, j);
363 e++;
364 }
365 }
366}
367TEST_END
368
369TEST_BEGIN(test_pt_gamma_scale) {
370 double shape = 1.0;
371 double ln_gamma_shape = ln_gamma(shape);
372
373 expect_true(double_eq_rel(
374 pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
375 pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
376 MAX_ABS_ERR),
377 "Scale should be trivially equivalent to external multiplication");
378}
379TEST_END
380
381int
382main(void) {
383 return test(
384 test_ln_gamma_factorial,
385 test_ln_gamma_misc,
386 test_pt_norm,
387 test_pt_chi2,
388 test_pt_gamma_shape,
389 test_pt_gamma_scale);
390}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/mpsc_queue.c b/examples/redis-unstable/deps/jemalloc/test/unit/mpsc_queue.c
deleted file mode 100644
index 895edf8..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/mpsc_queue.c
+++ /dev/null
@@ -1,304 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/mpsc_queue.h"
4
5typedef struct elem_s elem_t;
6typedef ql_head(elem_t) elem_list_t;
7typedef mpsc_queue(elem_t) elem_mpsc_queue_t;
8struct elem_s {
9 int thread;
10 int idx;
11 ql_elm(elem_t) link;
12};
13
14/* Include both proto and gen to make sure they match up. */
15mpsc_queue_proto(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
16 elem_list_t);
17mpsc_queue_gen(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
18 elem_list_t, link);
19
20static void
21init_elems_simple(elem_t *elems, int nelems, int thread) {
22 for (int i = 0; i < nelems; i++) {
23 elems[i].thread = thread;
24 elems[i].idx = i;
25 ql_elm_new(&elems[i], link);
26 }
27}
28
29static void
30check_elems_simple(elem_list_t *list, int nelems, int thread) {
31 elem_t *elem;
32 int next_idx = 0;
33 ql_foreach(elem, list, link) {
34 expect_d_lt(next_idx, nelems, "Too many list items");
35 expect_d_eq(thread, elem->thread, "");
36 expect_d_eq(next_idx, elem->idx, "List out of order");
37 next_idx++;
38 }
39}
40
41TEST_BEGIN(test_simple) {
42 enum {NELEMS = 10};
43 elem_t elems[NELEMS];
44 elem_list_t list;
45 elem_mpsc_queue_t queue;
46
47 /* Pop empty queue onto empty list -> empty list */
48 ql_new(&list);
49 elem_mpsc_queue_new(&queue);
50 elem_mpsc_queue_pop_batch(&queue, &list);
51 expect_true(ql_empty(&list), "");
52
53 /* Pop empty queue onto nonempty list -> list unchanged */
54 ql_new(&list);
55 elem_mpsc_queue_new(&queue);
56 init_elems_simple(elems, NELEMS, 0);
57 for (int i = 0; i < NELEMS; i++) {
58 ql_tail_insert(&list, &elems[i], link);
59 }
60 elem_mpsc_queue_pop_batch(&queue, &list);
61 check_elems_simple(&list, NELEMS, 0);
62
63 /* Pop nonempty queue onto empty list -> list takes queue contents */
64 ql_new(&list);
65 elem_mpsc_queue_new(&queue);
66 init_elems_simple(elems, NELEMS, 0);
67 for (int i = 0; i < NELEMS; i++) {
68 elem_mpsc_queue_push(&queue, &elems[i]);
69 }
70 elem_mpsc_queue_pop_batch(&queue, &list);
71 check_elems_simple(&list, NELEMS, 0);
72
73 /* Pop nonempty queue onto nonempty list -> list gains queue contents */
74 ql_new(&list);
75 elem_mpsc_queue_new(&queue);
76 init_elems_simple(elems, NELEMS, 0);
77 for (int i = 0; i < NELEMS / 2; i++) {
78 ql_tail_insert(&list, &elems[i], link);
79 }
80 for (int i = NELEMS / 2; i < NELEMS; i++) {
81 elem_mpsc_queue_push(&queue, &elems[i]);
82 }
83 elem_mpsc_queue_pop_batch(&queue, &list);
84 check_elems_simple(&list, NELEMS, 0);
85
86}
87TEST_END
88
89TEST_BEGIN(test_push_single_or_batch) {
90 enum {
91 BATCH_MAX = 10,
92 /*
93 * We'll push i items one-at-a-time, then i items as a batch,
94 * then i items as a batch again, as i ranges from 1 to
95 * BATCH_MAX. So we need 3 times the sum of the numbers from 1
96 * to BATCH_MAX elements total.
97 */
98 NELEMS = 3 * BATCH_MAX * (BATCH_MAX - 1) / 2
99 };
100 elem_t elems[NELEMS];
101 init_elems_simple(elems, NELEMS, 0);
102 elem_list_t list;
103 ql_new(&list);
104 elem_mpsc_queue_t queue;
105 elem_mpsc_queue_new(&queue);
106 int next_idx = 0;
107 for (int i = 1; i < 10; i++) {
108 /* Push i items 1 at a time. */
109 for (int j = 0; j < i; j++) {
110 elem_mpsc_queue_push(&queue, &elems[next_idx]);
111 next_idx++;
112 }
113 /* Push i items in batch. */
114 for (int j = 0; j < i; j++) {
115 ql_tail_insert(&list, &elems[next_idx], link);
116 next_idx++;
117 }
118 elem_mpsc_queue_push_batch(&queue, &list);
119 expect_true(ql_empty(&list), "Batch push should empty source");
120 /*
121 * Push i items in batch, again. This tests two batches
122 * proceeding one after the other.
123 */
124 for (int j = 0; j < i; j++) {
125 ql_tail_insert(&list, &elems[next_idx], link);
126 next_idx++;
127 }
128 elem_mpsc_queue_push_batch(&queue, &list);
129 expect_true(ql_empty(&list), "Batch push should empty source");
130 }
131 expect_d_eq(NELEMS, next_idx, "Miscomputed number of elems to push.");
132
133 expect_true(ql_empty(&list), "");
134 elem_mpsc_queue_pop_batch(&queue, &list);
135 check_elems_simple(&list, NELEMS, 0);
136}
137TEST_END
138
139TEST_BEGIN(test_multi_op) {
140 enum {NELEMS = 20};
141 elem_t elems[NELEMS];
142 init_elems_simple(elems, NELEMS, 0);
143 elem_list_t push_list;
144 ql_new(&push_list);
145 elem_list_t result_list;
146 ql_new(&result_list);
147 elem_mpsc_queue_t queue;
148 elem_mpsc_queue_new(&queue);
149
150 int next_idx = 0;
151 /* Push first quarter 1-at-a-time. */
152 for (int i = 0; i < NELEMS / 4; i++) {
153 elem_mpsc_queue_push(&queue, &elems[next_idx]);
154 next_idx++;
155 }
156 /* Push second quarter in batch. */
157 for (int i = NELEMS / 4; i < NELEMS / 2; i++) {
158 ql_tail_insert(&push_list, &elems[next_idx], link);
159 next_idx++;
160 }
161 elem_mpsc_queue_push_batch(&queue, &push_list);
162 /* Batch pop all pushed elements. */
163 elem_mpsc_queue_pop_batch(&queue, &result_list);
164 /* Push third quarter in batch. */
165 for (int i = NELEMS / 2; i < 3 * NELEMS / 4; i++) {
166 ql_tail_insert(&push_list, &elems[next_idx], link);
167 next_idx++;
168 }
169 elem_mpsc_queue_push_batch(&queue, &push_list);
170 /* Push last quarter one-at-a-time. */
171 for (int i = 3 * NELEMS / 4; i < NELEMS; i++) {
172 elem_mpsc_queue_push(&queue, &elems[next_idx]);
173 next_idx++;
174 }
175 /* Pop them again. Order of existing list should be preserved. */
176 elem_mpsc_queue_pop_batch(&queue, &result_list);
177
178 check_elems_simple(&result_list, NELEMS, 0);
179
180}
181TEST_END
182
183typedef struct pusher_arg_s pusher_arg_t;
184struct pusher_arg_s {
185 elem_mpsc_queue_t *queue;
186 int thread;
187 elem_t *elems;
188 int nelems;
189};
190
191typedef struct popper_arg_s popper_arg_t;
192struct popper_arg_s {
193 elem_mpsc_queue_t *queue;
194 int npushers;
195 int nelems_per_pusher;
196 int *pusher_counts;
197};
198
199static void *
200thd_pusher(void *void_arg) {
201 pusher_arg_t *arg = (pusher_arg_t *)void_arg;
202 int next_idx = 0;
203 while (next_idx < arg->nelems) {
204 /* Push 10 items in batch. */
205 elem_list_t list;
206 ql_new(&list);
207 int limit = next_idx + 10;
208 while (next_idx < arg->nelems && next_idx < limit) {
209 ql_tail_insert(&list, &arg->elems[next_idx], link);
210 next_idx++;
211 }
212 elem_mpsc_queue_push_batch(arg->queue, &list);
213 /* Push 10 items one-at-a-time. */
214 limit = next_idx + 10;
215 while (next_idx < arg->nelems && next_idx < limit) {
216 elem_mpsc_queue_push(arg->queue, &arg->elems[next_idx]);
217 next_idx++;
218 }
219
220 }
221 return NULL;
222}
223
224static void *
225thd_popper(void *void_arg) {
226 popper_arg_t *arg = (popper_arg_t *)void_arg;
227 int done_pushers = 0;
228 while (done_pushers < arg->npushers) {
229 elem_list_t list;
230 ql_new(&list);
231 elem_mpsc_queue_pop_batch(arg->queue, &list);
232 elem_t *elem;
233 ql_foreach(elem, &list, link) {
234 int thread = elem->thread;
235 int idx = elem->idx;
236 expect_d_eq(arg->pusher_counts[thread], idx,
237 "Thread's pushes reordered");
238 arg->pusher_counts[thread]++;
239 if (arg->pusher_counts[thread]
240 == arg->nelems_per_pusher) {
241 done_pushers++;
242 }
243 }
244 }
245 return NULL;
246}
247
248TEST_BEGIN(test_multiple_threads) {
249 enum {
250 NPUSHERS = 4,
251 NELEMS_PER_PUSHER = 1000*1000,
252 };
253 thd_t pushers[NPUSHERS];
254 pusher_arg_t pusher_arg[NPUSHERS];
255
256 thd_t popper;
257 popper_arg_t popper_arg;
258
259 elem_mpsc_queue_t queue;
260 elem_mpsc_queue_new(&queue);
261
262 elem_t *elems = calloc(NPUSHERS * NELEMS_PER_PUSHER, sizeof(elem_t));
263 elem_t *elem_iter = elems;
264 for (int i = 0; i < NPUSHERS; i++) {
265 pusher_arg[i].queue = &queue;
266 pusher_arg[i].thread = i;
267 pusher_arg[i].elems = elem_iter;
268 pusher_arg[i].nelems = NELEMS_PER_PUSHER;
269
270 init_elems_simple(elem_iter, NELEMS_PER_PUSHER, i);
271 elem_iter += NELEMS_PER_PUSHER;
272 }
273 popper_arg.queue = &queue;
274 popper_arg.npushers = NPUSHERS;
275 popper_arg.nelems_per_pusher = NELEMS_PER_PUSHER;
276 int pusher_counts[NPUSHERS] = {0};
277 popper_arg.pusher_counts = pusher_counts;
278
279 thd_create(&popper, thd_popper, (void *)&popper_arg);
280 for (int i = 0; i < NPUSHERS; i++) {
281 thd_create(&pushers[i], thd_pusher, &pusher_arg[i]);
282 }
283
284 thd_join(popper, NULL);
285 for (int i = 0; i < NPUSHERS; i++) {
286 thd_join(pushers[i], NULL);
287 }
288
289 for (int i = 0; i < NPUSHERS; i++) {
290 expect_d_eq(NELEMS_PER_PUSHER, pusher_counts[i], "");
291 }
292
293 free(elems);
294}
295TEST_END
296
297int
298main(void) {
299 return test_no_reentrancy(
300 test_simple,
301 test_push_single_or_batch,
302 test_multi_op,
303 test_multiple_threads);
304}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/mq.c b/examples/redis-unstable/deps/jemalloc/test/unit/mq.c
deleted file mode 100644
index f833f77..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/mq.c
+++ /dev/null
@@ -1,89 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define NSENDERS 3
4#define NMSGS 100000
5
6typedef struct mq_msg_s mq_msg_t;
7struct mq_msg_s {
8 mq_msg(mq_msg_t) link;
9};
10mq_gen(static, mq_, mq_t, mq_msg_t, link)
11
12TEST_BEGIN(test_mq_basic) {
13 mq_t mq;
14 mq_msg_t msg;
15
16 expect_false(mq_init(&mq), "Unexpected mq_init() failure");
17 expect_u_eq(mq_count(&mq), 0, "mq should be empty");
18 expect_ptr_null(mq_tryget(&mq),
19 "mq_tryget() should fail when the queue is empty");
20
21 mq_put(&mq, &msg);
22 expect_u_eq(mq_count(&mq), 1, "mq should contain one message");
23 expect_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
24
25 mq_put(&mq, &msg);
26 expect_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
27
28 mq_fini(&mq);
29}
30TEST_END
31
32static void *
33thd_receiver_start(void *arg) {
34 mq_t *mq = (mq_t *)arg;
35 unsigned i;
36
37 for (i = 0; i < (NSENDERS * NMSGS); i++) {
38 mq_msg_t *msg = mq_get(mq);
39 expect_ptr_not_null(msg, "mq_get() should never return NULL");
40 dallocx(msg, 0);
41 }
42 return NULL;
43}
44
45static void *
46thd_sender_start(void *arg) {
47 mq_t *mq = (mq_t *)arg;
48 unsigned i;
49
50 for (i = 0; i < NMSGS; i++) {
51 mq_msg_t *msg;
52 void *p;
53 p = mallocx(sizeof(mq_msg_t), 0);
54 expect_ptr_not_null(p, "Unexpected mallocx() failure");
55 msg = (mq_msg_t *)p;
56 mq_put(mq, msg);
57 }
58 return NULL;
59}
60
61TEST_BEGIN(test_mq_threaded) {
62 mq_t mq;
63 thd_t receiver;
64 thd_t senders[NSENDERS];
65 unsigned i;
66
67 expect_false(mq_init(&mq), "Unexpected mq_init() failure");
68
69 thd_create(&receiver, thd_receiver_start, (void *)&mq);
70 for (i = 0; i < NSENDERS; i++) {
71 thd_create(&senders[i], thd_sender_start, (void *)&mq);
72 }
73
74 thd_join(receiver, NULL);
75 for (i = 0; i < NSENDERS; i++) {
76 thd_join(senders[i], NULL);
77 }
78
79 mq_fini(&mq);
80}
81TEST_END
82
83int
84main(void) {
85 return test(
86 test_mq_basic,
87 test_mq_threaded);
88}
89
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/mtx.c b/examples/redis-unstable/deps/jemalloc/test/unit/mtx.c
deleted file mode 100644
index 4aeebc1..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/mtx.c
+++ /dev/null
@@ -1,57 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define NTHREADS 2
4#define NINCRS 2000000
5
6TEST_BEGIN(test_mtx_basic) {
7 mtx_t mtx;
8
9 expect_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
10 mtx_lock(&mtx);
11 mtx_unlock(&mtx);
12 mtx_fini(&mtx);
13}
14TEST_END
15
16typedef struct {
17 mtx_t mtx;
18 unsigned x;
19} thd_start_arg_t;
20
21static void *
22thd_start(void *varg) {
23 thd_start_arg_t *arg = (thd_start_arg_t *)varg;
24 unsigned i;
25
26 for (i = 0; i < NINCRS; i++) {
27 mtx_lock(&arg->mtx);
28 arg->x++;
29 mtx_unlock(&arg->mtx);
30 }
31 return NULL;
32}
33
34TEST_BEGIN(test_mtx_race) {
35 thd_start_arg_t arg;
36 thd_t thds[NTHREADS];
37 unsigned i;
38
39 expect_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
40 arg.x = 0;
41 for (i = 0; i < NTHREADS; i++) {
42 thd_create(&thds[i], thd_start, (void *)&arg);
43 }
44 for (i = 0; i < NTHREADS; i++) {
45 thd_join(thds[i], NULL);
46 }
47 expect_u_eq(arg.x, NTHREADS * NINCRS,
48 "Race-related counter corruption");
49}
50TEST_END
51
52int
53main(void) {
54 return test(
55 test_mtx_basic,
56 test_mtx_race);
57}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/nstime.c b/examples/redis-unstable/deps/jemalloc/test/unit/nstime.c
deleted file mode 100644
index 56238ab..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/nstime.c
+++ /dev/null
@@ -1,252 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define BILLION UINT64_C(1000000000)
4
5TEST_BEGIN(test_nstime_init) {
6 nstime_t nst;
7
8 nstime_init(&nst, 42000000043);
9 expect_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
10 expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
11 expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
12}
13TEST_END
14
15TEST_BEGIN(test_nstime_init2) {
16 nstime_t nst;
17
18 nstime_init2(&nst, 42, 43);
19 expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
20 expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
21}
22TEST_END
23
24TEST_BEGIN(test_nstime_copy) {
25 nstime_t nsta, nstb;
26
27 nstime_init2(&nsta, 42, 43);
28 nstime_init_zero(&nstb);
29 nstime_copy(&nstb, &nsta);
30 expect_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
31 expect_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
32}
33TEST_END
34
35TEST_BEGIN(test_nstime_compare) {
36 nstime_t nsta, nstb;
37
38 nstime_init2(&nsta, 42, 43);
39 nstime_copy(&nstb, &nsta);
40 expect_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
41 expect_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
42
43 nstime_init2(&nstb, 42, 42);
44 expect_d_eq(nstime_compare(&nsta, &nstb), 1,
45 "nsta should be greater than nstb");
46 expect_d_eq(nstime_compare(&nstb, &nsta), -1,
47 "nstb should be less than nsta");
48
49 nstime_init2(&nstb, 42, 44);
50 expect_d_eq(nstime_compare(&nsta, &nstb), -1,
51 "nsta should be less than nstb");
52 expect_d_eq(nstime_compare(&nstb, &nsta), 1,
53 "nstb should be greater than nsta");
54
55 nstime_init2(&nstb, 41, BILLION - 1);
56 expect_d_eq(nstime_compare(&nsta, &nstb), 1,
57 "nsta should be greater than nstb");
58 expect_d_eq(nstime_compare(&nstb, &nsta), -1,
59 "nstb should be less than nsta");
60
61 nstime_init2(&nstb, 43, 0);
62 expect_d_eq(nstime_compare(&nsta, &nstb), -1,
63 "nsta should be less than nstb");
64 expect_d_eq(nstime_compare(&nstb, &nsta), 1,
65 "nstb should be greater than nsta");
66}
67TEST_END
68
69TEST_BEGIN(test_nstime_add) {
70 nstime_t nsta, nstb;
71
72 nstime_init2(&nsta, 42, 43);
73 nstime_copy(&nstb, &nsta);
74 nstime_add(&nsta, &nstb);
75 nstime_init2(&nstb, 84, 86);
76 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
77 "Incorrect addition result");
78
79 nstime_init2(&nsta, 42, BILLION - 1);
80 nstime_copy(&nstb, &nsta);
81 nstime_add(&nsta, &nstb);
82 nstime_init2(&nstb, 85, BILLION - 2);
83 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
84 "Incorrect addition result");
85}
86TEST_END
87
88TEST_BEGIN(test_nstime_iadd) {
89 nstime_t nsta, nstb;
90
91 nstime_init2(&nsta, 42, BILLION - 1);
92 nstime_iadd(&nsta, 1);
93 nstime_init2(&nstb, 43, 0);
94 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
95 "Incorrect addition result");
96
97 nstime_init2(&nsta, 42, 1);
98 nstime_iadd(&nsta, BILLION + 1);
99 nstime_init2(&nstb, 43, 2);
100 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
101 "Incorrect addition result");
102}
103TEST_END
104
105TEST_BEGIN(test_nstime_subtract) {
106 nstime_t nsta, nstb;
107
108 nstime_init2(&nsta, 42, 43);
109 nstime_copy(&nstb, &nsta);
110 nstime_subtract(&nsta, &nstb);
111 nstime_init_zero(&nstb);
112 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
113 "Incorrect subtraction result");
114
115 nstime_init2(&nsta, 42, 43);
116 nstime_init2(&nstb, 41, 44);
117 nstime_subtract(&nsta, &nstb);
118 nstime_init2(&nstb, 0, BILLION - 1);
119 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
120 "Incorrect subtraction result");
121}
122TEST_END
123
124TEST_BEGIN(test_nstime_isubtract) {
125 nstime_t nsta, nstb;
126
127 nstime_init2(&nsta, 42, 43);
128 nstime_isubtract(&nsta, 42*BILLION + 43);
129 nstime_init_zero(&nstb);
130 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
131 "Incorrect subtraction result");
132
133 nstime_init2(&nsta, 42, 43);
134 nstime_isubtract(&nsta, 41*BILLION + 44);
135 nstime_init2(&nstb, 0, BILLION - 1);
136 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
137 "Incorrect subtraction result");
138}
139TEST_END
140
141TEST_BEGIN(test_nstime_imultiply) {
142 nstime_t nsta, nstb;
143
144 nstime_init2(&nsta, 42, 43);
145 nstime_imultiply(&nsta, 10);
146 nstime_init2(&nstb, 420, 430);
147 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
148 "Incorrect multiplication result");
149
150 nstime_init2(&nsta, 42, 666666666);
151 nstime_imultiply(&nsta, 3);
152 nstime_init2(&nstb, 127, 999999998);
153 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
154 "Incorrect multiplication result");
155}
156TEST_END
157
158TEST_BEGIN(test_nstime_idivide) {
159 nstime_t nsta, nstb;
160
161 nstime_init2(&nsta, 42, 43);
162 nstime_copy(&nstb, &nsta);
163 nstime_imultiply(&nsta, 10);
164 nstime_idivide(&nsta, 10);
165 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
166 "Incorrect division result");
167
168 nstime_init2(&nsta, 42, 666666666);
169 nstime_copy(&nstb, &nsta);
170 nstime_imultiply(&nsta, 3);
171 nstime_idivide(&nsta, 3);
172 expect_d_eq(nstime_compare(&nsta, &nstb), 0,
173 "Incorrect division result");
174}
175TEST_END
176
177TEST_BEGIN(test_nstime_divide) {
178 nstime_t nsta, nstb, nstc;
179
180 nstime_init2(&nsta, 42, 43);
181 nstime_copy(&nstb, &nsta);
182 nstime_imultiply(&nsta, 10);
183 expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
184 "Incorrect division result");
185
186 nstime_init2(&nsta, 42, 43);
187 nstime_copy(&nstb, &nsta);
188 nstime_imultiply(&nsta, 10);
189 nstime_init(&nstc, 1);
190 nstime_add(&nsta, &nstc);
191 expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
192 "Incorrect division result");
193
194 nstime_init2(&nsta, 42, 43);
195 nstime_copy(&nstb, &nsta);
196 nstime_imultiply(&nsta, 10);
197 nstime_init(&nstc, 1);
198 nstime_subtract(&nsta, &nstc);
199 expect_u64_eq(nstime_divide(&nsta, &nstb), 9,
200 "Incorrect division result");
201}
202TEST_END
203
204void
205test_nstime_since_once(nstime_t *t) {
206 nstime_t old_t;
207 nstime_copy(&old_t, t);
208
209 uint64_t ns_since = nstime_ns_since(t);
210 nstime_update(t);
211
212 nstime_t new_t;
213 nstime_copy(&new_t, t);
214 nstime_subtract(&new_t, &old_t);
215
216 expect_u64_ge(nstime_ns(&new_t), ns_since,
217 "Incorrect time since result");
218}
219
220TEST_BEGIN(test_nstime_ns_since) {
221 nstime_t t;
222
223 nstime_init_update(&t);
224 for (uint64_t i = 0; i < 10000; i++) {
225 /* Keeps updating t and verifies ns_since is valid. */
226 test_nstime_since_once(&t);
227 }
228}
229TEST_END
230
231TEST_BEGIN(test_nstime_monotonic) {
232 nstime_monotonic();
233}
234TEST_END
235
236int
237main(void) {
238 return test(
239 test_nstime_init,
240 test_nstime_init2,
241 test_nstime_copy,
242 test_nstime_compare,
243 test_nstime_add,
244 test_nstime_iadd,
245 test_nstime_subtract,
246 test_nstime_isubtract,
247 test_nstime_imultiply,
248 test_nstime_idivide,
249 test_nstime_divide,
250 test_nstime_ns_since,
251 test_nstime_monotonic);
252}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/oversize_threshold.c b/examples/redis-unstable/deps/jemalloc/test/unit/oversize_threshold.c
deleted file mode 100644
index 44a8f76..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/oversize_threshold.c
+++ /dev/null
@@ -1,133 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/ctl.h"
4
5static void
6arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp,
7 size_t *oldlen, void *newp, size_t newlen) {
8 int err;
9 char buf[100];
10 malloc_snprintf(buf, sizeof(buf), mallctl_str, arena);
11
12 err = mallctl(buf, oldp, oldlen, newp, newlen);
13 expect_d_eq(0, err, "Mallctl failed; %s", buf);
14}
15
16TEST_BEGIN(test_oversize_threshold_get_set) {
17 int err;
18 size_t old_threshold;
19 size_t new_threshold;
20 size_t threshold_sz = sizeof(old_threshold);
21
22 unsigned arena;
23 size_t arena_sz = sizeof(arena);
24 err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
25 expect_d_eq(0, err, "Arena creation failed");
26
27 /* Just a write. */
28 new_threshold = 1024 * 1024;
29 arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
30 &new_threshold, threshold_sz);
31
32 /* Read and write */
33 new_threshold = 2 * 1024 * 1024;
34 arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
35 &threshold_sz, &new_threshold, threshold_sz);
36 expect_zu_eq(1024 * 1024, old_threshold, "Should have read old value");
37
38 /* Just a read */
39 arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
40 &threshold_sz, NULL, 0);
41 expect_zu_eq(2 * 1024 * 1024, old_threshold, "Should have read old value");
42}
43TEST_END
44
45static size_t max_purged = 0;
46static bool
47purge_forced_record_max(extent_hooks_t* hooks, void *addr, size_t sz,
48 size_t offset, size_t length, unsigned arena_ind) {
49 if (length > max_purged) {
50 max_purged = length;
51 }
52 return false;
53}
54
55static bool
56dalloc_record_max(extent_hooks_t *extent_hooks, void *addr, size_t sz,
57 bool comitted, unsigned arena_ind) {
58 if (sz > max_purged) {
59 max_purged = sz;
60 }
61 return false;
62}
63
64extent_hooks_t max_recording_extent_hooks;
65
66TEST_BEGIN(test_oversize_threshold) {
67 max_recording_extent_hooks = ehooks_default_extent_hooks;
68 max_recording_extent_hooks.purge_forced = &purge_forced_record_max;
69 max_recording_extent_hooks.dalloc = &dalloc_record_max;
70
71 extent_hooks_t *extent_hooks = &max_recording_extent_hooks;
72
73 int err;
74
75 unsigned arena;
76 size_t arena_sz = sizeof(arena);
77 err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
78 expect_d_eq(0, err, "Arena creation failed");
79 arena_mallctl("arena.%u.extent_hooks", arena, NULL, NULL, &extent_hooks,
80 sizeof(extent_hooks));
81
82 /*
83 * This test will fundamentally race with purging, since we're going to
84 * check the dirty stats to see if our oversized allocation got purged.
85 * We don't want other purging to happen accidentally. We can't just
86 * disable purging entirely, though, since that will also disable
87 * oversize purging. Just set purging intervals to be very large.
88 */
89 ssize_t decay_ms = 100 * 1000;
90 ssize_t decay_ms_sz = sizeof(decay_ms);
91 arena_mallctl("arena.%u.dirty_decay_ms", arena, NULL, NULL, &decay_ms,
92 decay_ms_sz);
93 arena_mallctl("arena.%u.muzzy_decay_ms", arena, NULL, NULL, &decay_ms,
94 decay_ms_sz);
95
96 /* Clean everything out. */
97 arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
98 max_purged = 0;
99
100 /* Set threshold to 1MB. */
101 size_t threshold = 1024 * 1024;
102 size_t threshold_sz = sizeof(threshold);
103 arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
104 &threshold, threshold_sz);
105
106 /* Allocating and freeing half a megabyte should leave them dirty. */
107 void *ptr = mallocx(512 * 1024, MALLOCX_ARENA(arena));
108 dallocx(ptr, MALLOCX_TCACHE_NONE);
109 if (!is_background_thread_enabled()) {
110 expect_zu_lt(max_purged, 512 * 1024, "Expected no 512k purge");
111 }
112
113 /* Purge again to reset everything out. */
114 arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
115 max_purged = 0;
116
117 /*
118 * Allocating and freeing 2 megabytes should have them purged because of
119 * the oversize threshold.
120 */
121 ptr = mallocx(2 * 1024 * 1024, MALLOCX_ARENA(arena));
122 dallocx(ptr, MALLOCX_TCACHE_NONE);
123 expect_zu_ge(max_purged, 2 * 1024 * 1024, "Expected a 2MB purge");
124}
125TEST_END
126
127int
128main(void) {
129 return test_no_reentrancy(
130 test_oversize_threshold_get_set,
131 test_oversize_threshold);
132}
133
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/pa.c b/examples/redis-unstable/deps/jemalloc/test/unit/pa.c
deleted file mode 100644
index b1e2f6e..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/pa.c
+++ /dev/null
@@ -1,126 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/pa.h"
4
5static void *
6alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
7 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
8 void *ret = pages_map(new_addr, size, alignment, commit);
9 return ret;
10}
11
12static bool
13merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
14 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
15 return !maps_coalesce;
16}
17
18static bool
19split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
20 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
21 return !maps_coalesce;
22}
23
24static void
25init_test_extent_hooks(extent_hooks_t *hooks) {
26 /*
27 * The default hooks are mostly fine for testing. A few of them,
28 * though, access globals (alloc for dss setting in an arena, split and
29 * merge touch the global emap to find head state. The first of these
30 * can be fixed by keeping that state with the hooks, where it logically
31 * belongs. The second, though, we can only fix when we use the extent
32 * hook API.
33 */
34 memcpy(hooks, &ehooks_default_extent_hooks, sizeof(extent_hooks_t));
35 hooks->alloc = &alloc_hook;
36 hooks->merge = &merge_hook;
37 hooks->split = &split_hook;
38}
39
40typedef struct test_data_s test_data_t;
41struct test_data_s {
42 pa_shard_t shard;
43 pa_central_t central;
44 base_t *base;
45 emap_t emap;
46 pa_shard_stats_t stats;
47 malloc_mutex_t stats_mtx;
48 extent_hooks_t hooks;
49};
50
51test_data_t *init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
52 test_data_t *test_data = calloc(1, sizeof(test_data_t));
53 assert_ptr_not_null(test_data, "");
54 init_test_extent_hooks(&test_data->hooks);
55
56 base_t *base = base_new(TSDN_NULL, /* ind */ 1, &test_data->hooks,
57 /* metadata_use_hooks */ true);
58 assert_ptr_not_null(base, "");
59
60 test_data->base = base;
61 bool err = emap_init(&test_data->emap, test_data->base,
62 /* zeroed */ true);
63 assert_false(err, "");
64
65 nstime_t time;
66 nstime_init(&time, 0);
67
68 err = pa_central_init(&test_data->central, base, opt_hpa,
69 &hpa_hooks_default);
70 assert_false(err, "");
71
72 const size_t pa_oversize_threshold = 8 * 1024 * 1024;
73 err = pa_shard_init(TSDN_NULL, &test_data->shard, &test_data->central,
74 &test_data->emap, test_data->base, /* ind */ 1, &test_data->stats,
75 &test_data->stats_mtx, &time, pa_oversize_threshold, dirty_decay_ms,
76 muzzy_decay_ms);
77 assert_false(err, "");
78
79 return test_data;
80}
81
82void destroy_test_data(test_data_t *data) {
83 base_delete(TSDN_NULL, data->base);
84 free(data);
85}
86
87static void *
88do_alloc_free_purge(void *arg) {
89 test_data_t *test_data = (test_data_t *)arg;
90 for (int i = 0; i < 10 * 1000; i++) {
91 bool deferred_work_generated = false;
92 edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
93 PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
94 /* guarded */ false, &deferred_work_generated);
95 assert_ptr_not_null(edata, "");
96 pa_dalloc(TSDN_NULL, &test_data->shard, edata,
97 &deferred_work_generated);
98 malloc_mutex_lock(TSDN_NULL,
99 &test_data->shard.pac.decay_dirty.mtx);
100 pac_decay_all(TSDN_NULL, &test_data->shard.pac,
101 &test_data->shard.pac.decay_dirty,
102 &test_data->shard.pac.stats->decay_dirty,
103 &test_data->shard.pac.ecache_dirty, true);
104 malloc_mutex_unlock(TSDN_NULL,
105 &test_data->shard.pac.decay_dirty.mtx);
106 }
107 return NULL;
108}
109
110TEST_BEGIN(test_alloc_free_purge_thds) {
111 test_data_t *test_data = init_test_data(0, 0);
112 thd_t thds[4];
113 for (int i = 0; i < 4; i++) {
114 thd_create(&thds[i], do_alloc_free_purge, test_data);
115 }
116 for (int i = 0; i < 4; i++) {
117 thd_join(thds[i], NULL);
118 }
119}
120TEST_END
121
122int
123main(void) {
124 return test(
125 test_alloc_free_purge_thds);
126}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/pack.c b/examples/redis-unstable/deps/jemalloc/test/unit/pack.c
deleted file mode 100644
index e639282..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/pack.c
+++ /dev/null
@@ -1,166 +0,0 @@
1#include "test/jemalloc_test.h"
2
3/*
4 * Size class that is a divisor of the page size, ideally 4+ regions per run.
5 */
6#if LG_PAGE <= 14
7#define SZ (ZU(1) << (LG_PAGE - 2))
8#else
9#define SZ ZU(4096)
10#endif
11
12/*
13 * Number of slabs to consume at high water mark. Should be at least 2 so that
14 * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
15 * tested.
16 */
17#define NSLABS 8
18
19static unsigned
20binind_compute(void) {
21 size_t sz;
22 unsigned nbins, i;
23
24 sz = sizeof(nbins);
25 expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
26 "Unexpected mallctl failure");
27
28 for (i = 0; i < nbins; i++) {
29 size_t mib[4];
30 size_t miblen = sizeof(mib)/sizeof(size_t);
31 size_t size;
32
33 expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
34 &miblen), 0, "Unexpected mallctlnametomb failure");
35 mib[2] = (size_t)i;
36
37 sz = sizeof(size);
38 expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
39 0), 0, "Unexpected mallctlbymib failure");
40 if (size == SZ) {
41 return i;
42 }
43 }
44
45 test_fail("Unable to compute nregs_per_run");
46 return 0;
47}
48
49static size_t
50nregs_per_run_compute(void) {
51 uint32_t nregs;
52 size_t sz;
53 unsigned binind = binind_compute();
54 size_t mib[4];
55 size_t miblen = sizeof(mib)/sizeof(size_t);
56
57 expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
58 "Unexpected mallctlnametomb failure");
59 mib[2] = (size_t)binind;
60 sz = sizeof(nregs);
61 expect_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
62 0), 0, "Unexpected mallctlbymib failure");
63 return nregs;
64}
65
66static unsigned
67arenas_create_mallctl(void) {
68 unsigned arena_ind;
69 size_t sz;
70
71 sz = sizeof(arena_ind);
72 expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
73 0, "Error in arenas.create");
74
75 return arena_ind;
76}
77
78static void
79arena_reset_mallctl(unsigned arena_ind) {
80 size_t mib[3];
81 size_t miblen = sizeof(mib)/sizeof(size_t);
82
83 expect_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
84 "Unexpected mallctlnametomib() failure");
85 mib[1] = (size_t)arena_ind;
86 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
87 "Unexpected mallctlbymib() failure");
88}
89
90TEST_BEGIN(test_pack) {
91 bool prof_enabled;
92 size_t sz = sizeof(prof_enabled);
93 if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) {
94 test_skip_if(prof_enabled);
95 }
96
97 unsigned arena_ind = arenas_create_mallctl();
98 size_t nregs_per_run = nregs_per_run_compute();
99 size_t nregs = nregs_per_run * NSLABS;
100 VARIABLE_ARRAY(void *, ptrs, nregs);
101 size_t i, j, offset;
102
103 /* Fill matrix. */
104 for (i = offset = 0; i < NSLABS; i++) {
105 for (j = 0; j < nregs_per_run; j++) {
106 void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
107 MALLOCX_TCACHE_NONE);
108 expect_ptr_not_null(p,
109 "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
110 " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
111 SZ, arena_ind, i, j);
112 ptrs[(i * nregs_per_run) + j] = p;
113 }
114 }
115
116 /*
117 * Free all but one region of each run, but rotate which region is
118 * preserved, so that subsequent allocations exercise the within-run
119 * layout policy.
120 */
121 offset = 0;
122 for (i = offset = 0;
123 i < NSLABS;
124 i++, offset = (offset + 1) % nregs_per_run) {
125 for (j = 0; j < nregs_per_run; j++) {
126 void *p = ptrs[(i * nregs_per_run) + j];
127 if (offset == j) {
128 continue;
129 }
130 dallocx(p, MALLOCX_ARENA(arena_ind) |
131 MALLOCX_TCACHE_NONE);
132 }
133 }
134
135 /*
136 * Logically refill matrix, skipping preserved regions and verifying
137 * that the matrix is unmodified.
138 */
139 offset = 0;
140 for (i = offset = 0;
141 i < NSLABS;
142 i++, offset = (offset + 1) % nregs_per_run) {
143 for (j = 0; j < nregs_per_run; j++) {
144 void *p;
145
146 if (offset == j) {
147 continue;
148 }
149 p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
150 MALLOCX_TCACHE_NONE);
151 expect_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
152 "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
153 i, j);
154 }
155 }
156
157 /* Clean up. */
158 arena_reset_mallctl(arena_ind);
159}
160TEST_END
161
162int
163main(void) {
164 return test(
165 test_pack);
166}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/pack.sh b/examples/redis-unstable/deps/jemalloc/test/unit/pack.sh
deleted file mode 100644
index 6f45148..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/pack.sh
+++ /dev/null
@@ -1,4 +0,0 @@
1#!/bin/sh
2
3# Immediately purge to minimize fragmentation.
4export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/pages.c b/examples/redis-unstable/deps/jemalloc/test/unit/pages.c
deleted file mode 100644
index 8dfd1a7..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/pages.c
+++ /dev/null
@@ -1,29 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_pages_huge) {
4 size_t alloc_size;
5 bool commit;
6 void *pages, *hugepage;
7
8 alloc_size = HUGEPAGE * 2 - PAGE;
9 commit = true;
10 pages = pages_map(NULL, alloc_size, PAGE, &commit);
11 expect_ptr_not_null(pages, "Unexpected pages_map() error");
12
13 if (init_system_thp_mode == thp_mode_default) {
14 hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));
15 expect_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
16 "Unexpected pages_huge() result");
17 expect_false(pages_nohuge(hugepage, HUGEPAGE),
18 "Unexpected pages_nohuge() result");
19 }
20
21 pages_unmap(pages, alloc_size);
22}
23TEST_END
24
25int
26main(void) {
27 return test(
28 test_pages_huge);
29}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/peak.c b/examples/redis-unstable/deps/jemalloc/test/unit/peak.c
deleted file mode 100644
index 1112978..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/peak.c
+++ /dev/null
@@ -1,47 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/peak.h"
4
5TEST_BEGIN(test_peak) {
6 peak_t peak = PEAK_INITIALIZER;
7 expect_u64_eq(0, peak_max(&peak),
8 "Peak should be zero at initialization");
9 peak_update(&peak, 100, 50);
10 expect_u64_eq(50, peak_max(&peak),
11 "Missed update");
12 peak_update(&peak, 100, 100);
13 expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
14 peak_update(&peak, 100, 200);
15 expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
16 peak_update(&peak, 200, 200);
17 expect_u64_eq(50, peak_max(&peak), "Haven't reached peak again");
18 peak_update(&peak, 300, 200);
19 expect_u64_eq(100, peak_max(&peak), "Missed an update.");
20 peak_set_zero(&peak, 300, 200);
21 expect_u64_eq(0, peak_max(&peak), "No effect from zeroing");
22 peak_update(&peak, 300, 300);
23 expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak");
24 peak_update(&peak, 400, 300);
25 expect_u64_eq(0, peak_max(&peak), "Should still be net negative");
26 peak_update(&peak, 500, 300);
27 expect_u64_eq(100, peak_max(&peak), "Missed an update.");
28 /*
29 * Above, we set to zero while a net allocator; let's try as a
30 * net-deallocator.
31 */
32 peak_set_zero(&peak, 600, 700);
33 expect_u64_eq(0, peak_max(&peak), "No effect from zeroing.");
34 peak_update(&peak, 600, 800);
35 expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak.");
36 peak_update(&peak, 700, 800);
37 expect_u64_eq(0, peak_max(&peak), "Should still be net negative.");
38 peak_update(&peak, 800, 800);
39 expect_u64_eq(100, peak_max(&peak), "Missed an update.");
40}
41TEST_END
42
43int
44main(void) {
45 return test_no_reentrancy(
46 test_peak);
47}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/ph.c b/examples/redis-unstable/deps/jemalloc/test/unit/ph.c
deleted file mode 100644
index 28f5e48..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/ph.c
+++ /dev/null
@@ -1,330 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/ph.h"
4
5typedef struct node_s node_t;
6ph_structs(heap, node_t);
7
8struct node_s {
9#define NODE_MAGIC 0x9823af7e
10 uint32_t magic;
11 heap_link_t link;
12 uint64_t key;
13};
14
15static int
16node_cmp(const node_t *a, const node_t *b) {
17 int ret;
18
19 ret = (a->key > b->key) - (a->key < b->key);
20 if (ret == 0) {
21 /*
22 * Duplicates are not allowed in the heap, so force an
23 * arbitrary ordering for non-identical items with equal keys.
24 */
25 ret = (((uintptr_t)a) > ((uintptr_t)b))
26 - (((uintptr_t)a) < ((uintptr_t)b));
27 }
28 return ret;
29}
30
31static int
32node_cmp_magic(const node_t *a, const node_t *b) {
33
34 expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
35 expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
36
37 return node_cmp(a, b);
38}
39
40ph_gen(static, heap, node_t, link, node_cmp_magic);
41
42static node_t *
43node_next_get(const node_t *node) {
44 return phn_next_get((node_t *)node, offsetof(node_t, link));
45}
46
47static node_t *
48node_prev_get(const node_t *node) {
49 return phn_prev_get((node_t *)node, offsetof(node_t, link));
50}
51
52static node_t *
53node_lchild_get(const node_t *node) {
54 return phn_lchild_get((node_t *)node, offsetof(node_t, link));
55}
56
57static void
58node_print(const node_t *node, unsigned depth) {
59 unsigned i;
60 node_t *leftmost_child, *sibling;
61
62 for (i = 0; i < depth; i++) {
63 malloc_printf("\t");
64 }
65 malloc_printf("%2"FMTu64"\n", node->key);
66
67 leftmost_child = node_lchild_get(node);
68 if (leftmost_child == NULL) {
69 return;
70 }
71 node_print(leftmost_child, depth + 1);
72
73 for (sibling = node_next_get(leftmost_child); sibling !=
74 NULL; sibling = node_next_get(sibling)) {
75 node_print(sibling, depth + 1);
76 }
77}
78
79static void
80heap_print(const heap_t *heap) {
81 node_t *auxelm;
82
83 malloc_printf("vvv heap %p vvv\n", heap);
84 if (heap->ph.root == NULL) {
85 goto label_return;
86 }
87
88 node_print(heap->ph.root, 0);
89
90 for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
91 auxelm = node_next_get(auxelm)) {
92 expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
93 "auxelm's prev doesn't link to auxelm");
94 node_print(auxelm, 0);
95 }
96
97label_return:
98 malloc_printf("^^^ heap %p ^^^\n", heap);
99}
100
101static unsigned
102node_validate(const node_t *node, const node_t *parent) {
103 unsigned nnodes = 1;
104 node_t *leftmost_child, *sibling;
105
106 if (parent != NULL) {
107 expect_d_ge(node_cmp_magic(node, parent), 0,
108 "Child is less than parent");
109 }
110
111 leftmost_child = node_lchild_get(node);
112 if (leftmost_child == NULL) {
113 return nnodes;
114 }
115 expect_ptr_eq(node_prev_get(leftmost_child),
116 (void *)node, "Leftmost child does not link to node");
117 nnodes += node_validate(leftmost_child, node);
118
119 for (sibling = node_next_get(leftmost_child); sibling !=
120 NULL; sibling = node_next_get(sibling)) {
121 expect_ptr_eq(node_next_get(node_prev_get(sibling)), sibling,
122 "sibling's prev doesn't link to sibling");
123 nnodes += node_validate(sibling, node);
124 }
125 return nnodes;
126}
127
128static unsigned
129heap_validate(const heap_t *heap) {
130 unsigned nnodes = 0;
131 node_t *auxelm;
132
133 if (heap->ph.root == NULL) {
134 goto label_return;
135 }
136
137 nnodes += node_validate(heap->ph.root, NULL);
138
139 for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
140 auxelm = node_next_get(auxelm)) {
141 expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
142 "auxelm's prev doesn't link to auxelm");
143 nnodes += node_validate(auxelm, NULL);
144 }
145
146label_return:
147 if (false) {
148 heap_print(heap);
149 }
150 return nnodes;
151}
152
153TEST_BEGIN(test_ph_empty) {
154 heap_t heap;
155
156 heap_new(&heap);
157 expect_true(heap_empty(&heap), "Heap should be empty");
158 expect_ptr_null(heap_first(&heap), "Unexpected node");
159 expect_ptr_null(heap_any(&heap), "Unexpected node");
160}
161TEST_END
162
163static void
164node_remove(heap_t *heap, node_t *node) {
165 heap_remove(heap, node);
166
167 node->magic = 0;
168}
169
170static node_t *
171node_remove_first(heap_t *heap) {
172 node_t *node = heap_remove_first(heap);
173 node->magic = 0;
174 return node;
175}
176
177static node_t *
178node_remove_any(heap_t *heap) {
179 node_t *node = heap_remove_any(heap);
180 node->magic = 0;
181 return node;
182}
183
184TEST_BEGIN(test_ph_random) {
185#define NNODES 25
186#define NBAGS 250
187#define SEED 42
188 sfmt_t *sfmt;
189 uint64_t bag[NNODES];
190 heap_t heap;
191 node_t nodes[NNODES];
192 unsigned i, j, k;
193
194 sfmt = init_gen_rand(SEED);
195 for (i = 0; i < NBAGS; i++) {
196 switch (i) {
197 case 0:
198 /* Insert in order. */
199 for (j = 0; j < NNODES; j++) {
200 bag[j] = j;
201 }
202 break;
203 case 1:
204 /* Insert in reverse order. */
205 for (j = 0; j < NNODES; j++) {
206 bag[j] = NNODES - j - 1;
207 }
208 break;
209 default:
210 for (j = 0; j < NNODES; j++) {
211 bag[j] = gen_rand64_range(sfmt, NNODES);
212 }
213 }
214
215 for (j = 1; j <= NNODES; j++) {
216 /* Initialize heap and nodes. */
217 heap_new(&heap);
218 expect_u_eq(heap_validate(&heap), 0,
219 "Incorrect node count");
220 for (k = 0; k < j; k++) {
221 nodes[k].magic = NODE_MAGIC;
222 nodes[k].key = bag[k];
223 }
224
225 /* Insert nodes. */
226 for (k = 0; k < j; k++) {
227 heap_insert(&heap, &nodes[k]);
228 if (i % 13 == 12) {
229 expect_ptr_not_null(heap_any(&heap),
230 "Heap should not be empty");
231 /* Trigger merging. */
232 expect_ptr_not_null(heap_first(&heap),
233 "Heap should not be empty");
234 }
235 expect_u_eq(heap_validate(&heap), k + 1,
236 "Incorrect node count");
237 }
238
239 expect_false(heap_empty(&heap),
240 "Heap should not be empty");
241
242 /* Remove nodes. */
243 switch (i % 6) {
244 case 0:
245 for (k = 0; k < j; k++) {
246 expect_u_eq(heap_validate(&heap), j - k,
247 "Incorrect node count");
248 node_remove(&heap, &nodes[k]);
249 expect_u_eq(heap_validate(&heap), j - k
250 - 1, "Incorrect node count");
251 }
252 break;
253 case 1:
254 for (k = j; k > 0; k--) {
255 node_remove(&heap, &nodes[k-1]);
256 expect_u_eq(heap_validate(&heap), k - 1,
257 "Incorrect node count");
258 }
259 break;
260 case 2: {
261 node_t *prev = NULL;
262 for (k = 0; k < j; k++) {
263 node_t *node = node_remove_first(&heap);
264 expect_u_eq(heap_validate(&heap), j - k
265 - 1, "Incorrect node count");
266 if (prev != NULL) {
267 expect_d_ge(node_cmp(node,
268 prev), 0,
269 "Bad removal order");
270 }
271 prev = node;
272 }
273 break;
274 } case 3: {
275 node_t *prev = NULL;
276 for (k = 0; k < j; k++) {
277 node_t *node = heap_first(&heap);
278 expect_u_eq(heap_validate(&heap), j - k,
279 "Incorrect node count");
280 if (prev != NULL) {
281 expect_d_ge(node_cmp(node,
282 prev), 0,
283 "Bad removal order");
284 }
285 node_remove(&heap, node);
286 expect_u_eq(heap_validate(&heap), j - k
287 - 1, "Incorrect node count");
288 prev = node;
289 }
290 break;
291 } case 4: {
292 for (k = 0; k < j; k++) {
293 node_remove_any(&heap);
294 expect_u_eq(heap_validate(&heap), j - k
295 - 1, "Incorrect node count");
296 }
297 break;
298 } case 5: {
299 for (k = 0; k < j; k++) {
300 node_t *node = heap_any(&heap);
301 expect_u_eq(heap_validate(&heap), j - k,
302 "Incorrect node count");
303 node_remove(&heap, node);
304 expect_u_eq(heap_validate(&heap), j - k
305 - 1, "Incorrect node count");
306 }
307 break;
308 } default:
309 not_reached();
310 }
311
312 expect_ptr_null(heap_first(&heap),
313 "Heap should be empty");
314 expect_ptr_null(heap_any(&heap),
315 "Heap should be empty");
316 expect_true(heap_empty(&heap), "Heap should be empty");
317 }
318 }
319 fini_gen_rand(sfmt);
320#undef NNODES
321#undef SEED
322}
323TEST_END
324
325int
326main(void) {
327 return test(
328 test_ph_empty,
329 test_ph_random);
330}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prng.c b/examples/redis-unstable/deps/jemalloc/test/unit/prng.c
deleted file mode 100644
index a6d9b01..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prng.c
+++ /dev/null
@@ -1,189 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_prng_lg_range_u32) {
4 uint32_t sa, sb;
5 uint32_t ra, rb;
6 unsigned lg_range;
7
8 sa = 42;
9 ra = prng_lg_range_u32(&sa, 32);
10 sa = 42;
11 rb = prng_lg_range_u32(&sa, 32);
12 expect_u32_eq(ra, rb,
13 "Repeated generation should produce repeated results");
14
15 sb = 42;
16 rb = prng_lg_range_u32(&sb, 32);
17 expect_u32_eq(ra, rb,
18 "Equivalent generation should produce equivalent results");
19
20 sa = 42;
21 ra = prng_lg_range_u32(&sa, 32);
22 rb = prng_lg_range_u32(&sa, 32);
23 expect_u32_ne(ra, rb,
24 "Full-width results must not immediately repeat");
25
26 sa = 42;
27 ra = prng_lg_range_u32(&sa, 32);
28 for (lg_range = 31; lg_range > 0; lg_range--) {
29 sb = 42;
30 rb = prng_lg_range_u32(&sb, lg_range);
31 expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
32 0, "High order bits should be 0, lg_range=%u", lg_range);
33 expect_u32_eq(rb, (ra >> (32 - lg_range)),
34 "Expected high order bits of full-width result, "
35 "lg_range=%u", lg_range);
36 }
37
38}
39TEST_END
40
41TEST_BEGIN(test_prng_lg_range_u64) {
42 uint64_t sa, sb, ra, rb;
43 unsigned lg_range;
44
45 sa = 42;
46 ra = prng_lg_range_u64(&sa, 64);
47 sa = 42;
48 rb = prng_lg_range_u64(&sa, 64);
49 expect_u64_eq(ra, rb,
50 "Repeated generation should produce repeated results");
51
52 sb = 42;
53 rb = prng_lg_range_u64(&sb, 64);
54 expect_u64_eq(ra, rb,
55 "Equivalent generation should produce equivalent results");
56
57 sa = 42;
58 ra = prng_lg_range_u64(&sa, 64);
59 rb = prng_lg_range_u64(&sa, 64);
60 expect_u64_ne(ra, rb,
61 "Full-width results must not immediately repeat");
62
63 sa = 42;
64 ra = prng_lg_range_u64(&sa, 64);
65 for (lg_range = 63; lg_range > 0; lg_range--) {
66 sb = 42;
67 rb = prng_lg_range_u64(&sb, lg_range);
68 expect_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
69 0, "High order bits should be 0, lg_range=%u", lg_range);
70 expect_u64_eq(rb, (ra >> (64 - lg_range)),
71 "Expected high order bits of full-width result, "
72 "lg_range=%u", lg_range);
73 }
74}
75TEST_END
76
77TEST_BEGIN(test_prng_lg_range_zu) {
78 size_t sa, sb;
79 size_t ra, rb;
80 unsigned lg_range;
81
82 sa = 42;
83 ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
84 sa = 42;
85 rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
86 expect_zu_eq(ra, rb,
87 "Repeated generation should produce repeated results");
88
89 sb = 42;
90 rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR));
91 expect_zu_eq(ra, rb,
92 "Equivalent generation should produce equivalent results");
93
94 sa = 42;
95 ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
96 rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
97 expect_zu_ne(ra, rb,
98 "Full-width results must not immediately repeat");
99
100 sa = 42;
101 ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
102 for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
103 lg_range--) {
104 sb = 42;
105 rb = prng_lg_range_zu(&sb, lg_range);
106 expect_zu_eq((rb & (SIZE_T_MAX << lg_range)),
107 0, "High order bits should be 0, lg_range=%u", lg_range);
108 expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
109 lg_range)), "Expected high order bits of full-width "
110 "result, lg_range=%u", lg_range);
111 }
112
113}
114TEST_END
115
116TEST_BEGIN(test_prng_range_u32) {
117 uint32_t range;
118
119 const uint32_t max_range = 10000000;
120 const uint32_t range_step = 97;
121 const unsigned nreps = 10;
122
123 for (range = 2; range < max_range; range += range_step) {
124 uint32_t s;
125 unsigned rep;
126
127 s = range;
128 for (rep = 0; rep < nreps; rep++) {
129 uint32_t r = prng_range_u32(&s, range);
130
131 expect_u32_lt(r, range, "Out of range");
132 }
133 }
134}
135TEST_END
136
137TEST_BEGIN(test_prng_range_u64) {
138 uint64_t range;
139
140 const uint64_t max_range = 10000000;
141 const uint64_t range_step = 97;
142 const unsigned nreps = 10;
143
144 for (range = 2; range < max_range; range += range_step) {
145 uint64_t s;
146 unsigned rep;
147
148 s = range;
149 for (rep = 0; rep < nreps; rep++) {
150 uint64_t r = prng_range_u64(&s, range);
151
152 expect_u64_lt(r, range, "Out of range");
153 }
154 }
155}
156TEST_END
157
158TEST_BEGIN(test_prng_range_zu) {
159 size_t range;
160
161 const size_t max_range = 10000000;
162 const size_t range_step = 97;
163 const unsigned nreps = 10;
164
165
166 for (range = 2; range < max_range; range += range_step) {
167 size_t s;
168 unsigned rep;
169
170 s = range;
171 for (rep = 0; rep < nreps; rep++) {
172 size_t r = prng_range_zu(&s, range);
173
174 expect_zu_lt(r, range, "Out of range");
175 }
176 }
177}
178TEST_END
179
180int
181main(void) {
182 return test_no_reentrancy(
183 test_prng_lg_range_u32,
184 test_prng_lg_range_u64,
185 test_prng_lg_range_zu,
186 test_prng_range_u32,
187 test_prng_range_u64,
188 test_prng_range_zu);
189}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.c
deleted file mode 100644
index ef392ac..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.c
+++ /dev/null
@@ -1,84 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_data.h"
4#include "jemalloc/internal/prof_sys.h"
5
6#define NTHREADS 4
7#define NALLOCS_PER_THREAD 50
8#define DUMP_INTERVAL 1
9#define BT_COUNT_CHECK_INTERVAL 5
10
11static int
12prof_dump_open_file_intercept(const char *filename, int mode) {
13 int fd;
14
15 fd = open("/dev/null", O_WRONLY);
16 assert_d_ne(fd, -1, "Unexpected open() failure");
17
18 return fd;
19}
20
21static void *
22alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) {
23 return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration);
24}
25
26static void *
27thd_start(void *varg) {
28 unsigned thd_ind = *(unsigned *)varg;
29 size_t bt_count_prev, bt_count;
30 unsigned i_prev, i;
31
32 i_prev = 0;
33 bt_count_prev = 0;
34 for (i = 0; i < NALLOCS_PER_THREAD; i++) {
35 void *p = alloc_from_permuted_backtrace(thd_ind, i);
36 dallocx(p, 0);
37 if (i % DUMP_INTERVAL == 0) {
38 expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
39 0, "Unexpected error while dumping heap profile");
40 }
41
42 if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
43 i+1 == NALLOCS_PER_THREAD) {
44 bt_count = prof_bt_count();
45 expect_zu_le(bt_count_prev+(i-i_prev), bt_count,
46 "Expected larger backtrace count increase");
47 i_prev = i;
48 bt_count_prev = bt_count;
49 }
50 }
51
52 return NULL;
53}
54
55TEST_BEGIN(test_idump) {
56 bool active;
57 thd_t thds[NTHREADS];
58 unsigned thd_args[NTHREADS];
59 unsigned i;
60
61 test_skip_if(!config_prof);
62
63 active = true;
64 expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
65 sizeof(active)), 0,
66 "Unexpected mallctl failure while activating profiling");
67
68 prof_dump_open_file = prof_dump_open_file_intercept;
69
70 for (i = 0; i < NTHREADS; i++) {
71 thd_args[i] = i;
72 thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
73 }
74 for (i = 0; i < NTHREADS; i++) {
75 thd_join(thds[i], NULL);
76 }
77}
78TEST_END
79
80int
81main(void) {
82 return test_no_reentrancy(
83 test_idump);
84}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.sh
deleted file mode 100644
index b3e13fc..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_accum.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_active.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_active.c
deleted file mode 100644
index af29e7a..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_active.c
+++ /dev/null
@@ -1,119 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_data.h"
4
5static void
6mallctl_bool_get(const char *name, bool expected, const char *func, int line) {
7 bool old;
8 size_t sz;
9
10 sz = sizeof(old);
11 expect_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
12 "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
13 expect_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
14 name);
15}
16
17static void
18mallctl_bool_set(const char *name, bool old_expected, bool val_new,
19 const char *func, int line) {
20 bool old;
21 size_t sz;
22
23 sz = sizeof(old);
24 expect_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
25 sizeof(val_new)), 0,
26 "%s():%d: Unexpected mallctl failure reading/writing %s", func,
27 line, name);
28 expect_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
29 line, name);
30}
31
32static void
33mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
34 int line) {
35 mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
36}
37#define mallctl_prof_active_get(a) \
38 mallctl_prof_active_get_impl(a, __func__, __LINE__)
39
40static void
41mallctl_prof_active_set_impl(bool prof_active_old_expected,
42 bool prof_active_new, const char *func, int line) {
43 mallctl_bool_set("prof.active", prof_active_old_expected,
44 prof_active_new, func, line);
45}
46#define mallctl_prof_active_set(a, b) \
47 mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
48
49static void
50mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
51 const char *func, int line) {
52 mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
53 func, line);
54}
55#define mallctl_thread_prof_active_get(a) \
56 mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
57
58static void
59mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
60 bool thread_prof_active_new, const char *func, int line) {
61 mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
62 thread_prof_active_new, func, line);
63}
64#define mallctl_thread_prof_active_set(a, b) \
65 mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
66
67static void
68prof_sampling_probe_impl(bool expect_sample, const char *func, int line) {
69 void *p;
70 size_t expected_backtraces = expect_sample ? 1 : 0;
71
72 expect_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
73 line);
74 p = mallocx(1, 0);
75 expect_ptr_not_null(p, "Unexpected mallocx() failure");
76 expect_zu_eq(prof_bt_count(), expected_backtraces,
77 "%s():%d: Unexpected backtrace count", func, line);
78 dallocx(p, 0);
79}
80#define prof_sampling_probe(a) \
81 prof_sampling_probe_impl(a, __func__, __LINE__)
82
83TEST_BEGIN(test_prof_active) {
84 test_skip_if(!config_prof);
85
86 mallctl_prof_active_get(true);
87 mallctl_thread_prof_active_get(false);
88
89 mallctl_prof_active_set(true, true);
90 mallctl_thread_prof_active_set(false, false);
91 /* prof.active, !thread.prof.active. */
92 prof_sampling_probe(false);
93
94 mallctl_prof_active_set(true, false);
95 mallctl_thread_prof_active_set(false, false);
96 /* !prof.active, !thread.prof.active. */
97 prof_sampling_probe(false);
98
99 mallctl_prof_active_set(false, false);
100 mallctl_thread_prof_active_set(false, true);
101 /* !prof.active, thread.prof.active. */
102 prof_sampling_probe(false);
103
104 mallctl_prof_active_set(false, true);
105 mallctl_thread_prof_active_set(true, true);
106 /* prof.active, thread.prof.active. */
107 prof_sampling_probe(true);
108
109 /* Restore settings. */
110 mallctl_prof_active_set(true, true);
111 mallctl_thread_prof_active_set(true, false);
112}
113TEST_END
114
115int
116main(void) {
117 return test_no_reentrancy(
118 test_prof_active);
119}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_active.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_active.sh
deleted file mode 100644
index 9749674..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_active.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,prof_thread_active_init:false,lg_prof_sample:0"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.c
deleted file mode 100644
index 46e4503..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.c
+++ /dev/null
@@ -1,77 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_sys.h"
4
5static bool did_prof_dump_open;
6
7static int
8prof_dump_open_file_intercept(const char *filename, int mode) {
9 int fd;
10
11 did_prof_dump_open = true;
12
13 fd = open("/dev/null", O_WRONLY);
14 assert_d_ne(fd, -1, "Unexpected open() failure");
15
16 return fd;
17}
18
19TEST_BEGIN(test_gdump) {
20 test_skip_if(opt_hpa);
21 bool active, gdump, gdump_old;
22 void *p, *q, *r, *s;
23 size_t sz;
24
25 test_skip_if(!config_prof);
26
27 active = true;
28 expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
29 sizeof(active)), 0,
30 "Unexpected mallctl failure while activating profiling");
31
32 prof_dump_open_file = prof_dump_open_file_intercept;
33
34 did_prof_dump_open = false;
35 p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
36 expect_ptr_not_null(p, "Unexpected mallocx() failure");
37 expect_true(did_prof_dump_open, "Expected a profile dump");
38
39 did_prof_dump_open = false;
40 q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
41 expect_ptr_not_null(q, "Unexpected mallocx() failure");
42 expect_true(did_prof_dump_open, "Expected a profile dump");
43
44 gdump = false;
45 sz = sizeof(gdump_old);
46 expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
47 (void *)&gdump, sizeof(gdump)), 0,
48 "Unexpected mallctl failure while disabling prof.gdump");
49 assert(gdump_old);
50 did_prof_dump_open = false;
51 r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
52 expect_ptr_not_null(q, "Unexpected mallocx() failure");
53 expect_false(did_prof_dump_open, "Unexpected profile dump");
54
55 gdump = true;
56 sz = sizeof(gdump_old);
57 expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
58 (void *)&gdump, sizeof(gdump)), 0,
59 "Unexpected mallctl failure while enabling prof.gdump");
60 assert(!gdump_old);
61 did_prof_dump_open = false;
62 s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
63 expect_ptr_not_null(q, "Unexpected mallocx() failure");
64 expect_true(did_prof_dump_open, "Expected a profile dump");
65
66 dallocx(p, 0);
67 dallocx(q, 0);
68 dallocx(r, 0);
69 dallocx(s, 0);
70}
71TEST_END
72
73int
74main(void) {
75 return test_no_reentrancy(
76 test_gdump);
77}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.sh
deleted file mode 100644
index 3f600d2..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_gdump.sh
+++ /dev/null
@@ -1,6 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true"
5fi
6
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.c
deleted file mode 100644
index 6480d93..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.c
+++ /dev/null
@@ -1,169 +0,0 @@
1#include "test/jemalloc_test.h"
2
3const char *dump_filename = "/dev/null";
4
5prof_backtrace_hook_t default_hook;
6
7bool mock_bt_hook_called = false;
8bool mock_dump_hook_called = false;
9
10void
11mock_bt_hook(void **vec, unsigned *len, unsigned max_len) {
12 *len = max_len;
13 for (unsigned i = 0; i < max_len; ++i) {
14 vec[i] = (void *)((uintptr_t)i);
15 }
16 mock_bt_hook_called = true;
17}
18
19void
20mock_bt_augmenting_hook(void **vec, unsigned *len, unsigned max_len) {
21 default_hook(vec, len, max_len);
22 expect_u_gt(*len, 0, "Default backtrace hook returned empty backtrace");
23 expect_u_lt(*len, max_len,
24 "Default backtrace hook returned too large backtrace");
25
26 /* Add a separator between default frames and augmented */
27 vec[*len] = (void *)0x030303030;
28 (*len)++;
29
30 /* Add more stack frames */
31 for (unsigned i = 0; i < 3; ++i) {
32 if (*len == max_len) {
33 break;
34 }
35 vec[*len] = (void *)((uintptr_t)i);
36 (*len)++;
37 }
38
39
40 mock_bt_hook_called = true;
41}
42
43void
44mock_dump_hook(const char *filename) {
45 mock_dump_hook_called = true;
46 expect_str_eq(filename, dump_filename,
47 "Incorrect file name passed to the dump hook");
48}
49
50TEST_BEGIN(test_prof_backtrace_hook_replace) {
51
52 test_skip_if(!config_prof);
53
54 mock_bt_hook_called = false;
55
56 void *p0 = mallocx(1, 0);
57 assert_ptr_not_null(p0, "Failed to allocate");
58
59 expect_false(mock_bt_hook_called, "Called mock hook before it's set");
60
61 prof_backtrace_hook_t null_hook = NULL;
62 expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
63 NULL, 0, (void *)&null_hook, sizeof(null_hook)),
64 EINVAL, "Incorrectly allowed NULL backtrace hook");
65
66 size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
67 prof_backtrace_hook_t hook = &mock_bt_hook;
68 expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
69 (void *)&default_hook, &default_hook_sz, (void *)&hook,
70 sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
71
72 void *p1 = mallocx(1, 0);
73 assert_ptr_not_null(p1, "Failed to allocate");
74
75 expect_true(mock_bt_hook_called, "Didn't call mock hook");
76
77 prof_backtrace_hook_t current_hook;
78 size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
79 expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
80 (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
81 sizeof(default_hook)), 0,
82 "Unexpected mallctl failure resetting hook to default");
83
84 expect_ptr_eq(current_hook, hook,
85 "Hook returned by mallctl is not equal to mock hook");
86
87 dallocx(p1, 0);
88 dallocx(p0, 0);
89}
90TEST_END
91
92TEST_BEGIN(test_prof_backtrace_hook_augment) {
93
94 test_skip_if(!config_prof);
95
96 mock_bt_hook_called = false;
97
98 void *p0 = mallocx(1, 0);
99 assert_ptr_not_null(p0, "Failed to allocate");
100
101 expect_false(mock_bt_hook_called, "Called mock hook before it's set");
102
103 size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
104 prof_backtrace_hook_t hook = &mock_bt_augmenting_hook;
105 expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
106 (void *)&default_hook, &default_hook_sz, (void *)&hook,
107 sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
108
109 void *p1 = mallocx(1, 0);
110 assert_ptr_not_null(p1, "Failed to allocate");
111
112 expect_true(mock_bt_hook_called, "Didn't call mock hook");
113
114 prof_backtrace_hook_t current_hook;
115 size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
116 expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
117 (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
118 sizeof(default_hook)), 0,
119 "Unexpected mallctl failure resetting hook to default");
120
121 expect_ptr_eq(current_hook, hook,
122 "Hook returned by mallctl is not equal to mock hook");
123
124 dallocx(p1, 0);
125 dallocx(p0, 0);
126}
127TEST_END
128
129TEST_BEGIN(test_prof_dump_hook) {
130
131 test_skip_if(!config_prof);
132
133 mock_dump_hook_called = false;
134
135 expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
136 sizeof(dump_filename)), 0, "Failed to dump heap profile");
137
138 expect_false(mock_dump_hook_called, "Called dump hook before it's set");
139
140 size_t default_hook_sz = sizeof(prof_dump_hook_t);
141 prof_dump_hook_t hook = &mock_dump_hook;
142 expect_d_eq(mallctl("experimental.hooks.prof_dump",
143 (void *)&default_hook, &default_hook_sz, (void *)&hook,
144 sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
145
146 expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
147 sizeof(dump_filename)), 0, "Failed to dump heap profile");
148
149 expect_true(mock_dump_hook_called, "Didn't call mock hook");
150
151 prof_dump_hook_t current_hook;
152 size_t current_hook_sz = sizeof(prof_dump_hook_t);
153 expect_d_eq(mallctl("experimental.hooks.prof_dump",
154 (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
155 sizeof(default_hook)), 0,
156 "Unexpected mallctl failure resetting hook to default");
157
158 expect_ptr_eq(current_hook, hook,
159 "Hook returned by mallctl is not equal to mock hook");
160}
161TEST_END
162
163int
164main(void) {
165 return test(
166 test_prof_backtrace_hook_replace,
167 test_prof_backtrace_hook_augment,
168 test_prof_dump_hook);
169}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.sh
deleted file mode 100644
index c7ebd8f..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_hook.sh
+++ /dev/null
@@ -1,6 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
5fi
6
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.c
deleted file mode 100644
index 455ac52..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.c
+++ /dev/null
@@ -1,57 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_sys.h"
4
5#define TEST_PREFIX "test_prefix"
6
7static bool did_prof_dump_open;
8
9static int
10prof_dump_open_file_intercept(const char *filename, int mode) {
11 int fd;
12
13 did_prof_dump_open = true;
14
15 const char filename_prefix[] = TEST_PREFIX ".";
16 expect_d_eq(strncmp(filename_prefix, filename, sizeof(filename_prefix)
17 - 1), 0, "Dump file name should start with \"" TEST_PREFIX ".\"");
18
19 fd = open("/dev/null", O_WRONLY);
20 assert_d_ne(fd, -1, "Unexpected open() failure");
21
22 return fd;
23}
24
25TEST_BEGIN(test_idump) {
26 bool active;
27 void *p;
28
29 const char *test_prefix = TEST_PREFIX;
30
31 test_skip_if(!config_prof);
32
33 active = true;
34
35 expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&test_prefix,
36 sizeof(test_prefix)), 0,
37 "Unexpected mallctl failure while overwriting dump prefix");
38
39 expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
40 sizeof(active)), 0,
41 "Unexpected mallctl failure while activating profiling");
42
43 prof_dump_open_file = prof_dump_open_file_intercept;
44
45 did_prof_dump_open = false;
46 p = mallocx(1, 0);
47 expect_ptr_not_null(p, "Unexpected mallocx() failure");
48 dallocx(p, 0);
49 expect_true(did_prof_dump_open, "Expected a profile dump");
50}
51TEST_END
52
53int
54main(void) {
55 return test(
56 test_idump);
57}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.sh
deleted file mode 100644
index 4dc599a..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_idump.sh
+++ /dev/null
@@ -1,8 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="tcache:false"
4if [ "x${enable_prof}" = "x1" ] ; then
5 export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0"
6fi
7
8
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_log.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_log.c
deleted file mode 100644
index 5ff208e..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_log.c
+++ /dev/null
@@ -1,151 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "jemalloc/internal/prof_log.h"
3
4#define N_PARAM 100
5#define N_THREADS 10
6
7static void expect_rep() {
8 expect_b_eq(prof_log_rep_check(), false, "Rep check failed");
9}
10
11static void expect_log_empty() {
12 expect_zu_eq(prof_log_bt_count(), 0,
13 "The log has backtraces; it isn't empty");
14 expect_zu_eq(prof_log_thr_count(), 0,
15 "The log has threads; it isn't empty");
16 expect_zu_eq(prof_log_alloc_count(), 0,
17 "The log has allocations; it isn't empty");
18}
19
20void *buf[N_PARAM];
21
22static void f() {
23 int i;
24 for (i = 0; i < N_PARAM; i++) {
25 buf[i] = malloc(100);
26 }
27 for (i = 0; i < N_PARAM; i++) {
28 free(buf[i]);
29 }
30}
31
32TEST_BEGIN(test_prof_log_many_logs) {
33 int i;
34
35 test_skip_if(!config_prof);
36
37 for (i = 0; i < N_PARAM; i++) {
38 expect_b_eq(prof_log_is_logging(), false,
39 "Logging shouldn't have started yet");
40 expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
41 "Unexpected mallctl failure when starting logging");
42 expect_b_eq(prof_log_is_logging(), true,
43 "Logging should be started by now");
44 expect_log_empty();
45 expect_rep();
46 f();
47 expect_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
48 expect_rep();
49 expect_b_eq(prof_log_is_logging(), true,
50 "Logging should still be on");
51 expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
52 "Unexpected mallctl failure when stopping logging");
53 expect_b_eq(prof_log_is_logging(), false,
54 "Logging should have turned off");
55 }
56}
57TEST_END
58
59thd_t thr_buf[N_THREADS];
60
61static void *f_thread(void *unused) {
62 int i;
63 for (i = 0; i < N_PARAM; i++) {
64 void *p = malloc(100);
65 memset(p, 100, 1);
66 free(p);
67 }
68
69 return NULL;
70}
71
72TEST_BEGIN(test_prof_log_many_threads) {
73
74 test_skip_if(!config_prof);
75
76 int i;
77 expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
78 "Unexpected mallctl failure when starting logging");
79 for (i = 0; i < N_THREADS; i++) {
80 thd_create(&thr_buf[i], &f_thread, NULL);
81 }
82
83 for (i = 0; i < N_THREADS; i++) {
84 thd_join(thr_buf[i], NULL);
85 }
86 expect_zu_eq(prof_log_thr_count(), N_THREADS,
87 "Wrong number of thread entries");
88 expect_rep();
89 expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
90 "Unexpected mallctl failure when stopping logging");
91}
92TEST_END
93
94static void f3() {
95 void *p = malloc(100);
96 free(p);
97}
98
99static void f1() {
100 void *p = malloc(100);
101 f3();
102 free(p);
103}
104
105static void f2() {
106 void *p = malloc(100);
107 free(p);
108}
109
110TEST_BEGIN(test_prof_log_many_traces) {
111
112 test_skip_if(!config_prof);
113
114 expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
115 "Unexpected mallctl failure when starting logging");
116 int i;
117 expect_rep();
118 expect_log_empty();
119 for (i = 0; i < N_PARAM; i++) {
120 expect_rep();
121 f1();
122 expect_rep();
123 f2();
124 expect_rep();
125 f3();
126 expect_rep();
127 }
128 /*
129 * There should be 8 total backtraces: two for malloc/free in f1(), two
130 * for malloc/free in f2(), two for malloc/free in f3(), and then two
131 * for malloc/free in f1()'s call to f3(). However compiler
132 * optimizations such as loop unrolling might generate more call sites.
133 * So >= 8 traces are expected.
134 */
135 expect_zu_ge(prof_log_bt_count(), 8,
136 "Expect at least 8 backtraces given sample workload");
137 expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
138 "Unexpected mallctl failure when stopping logging");
139}
140TEST_END
141
142int
143main(void) {
144 if (config_prof) {
145 prof_log_dummy_set(true);
146 }
147 return test_no_reentrancy(
148 test_prof_log_many_logs,
149 test_prof_log_many_traces,
150 test_prof_log_many_threads);
151}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_log.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_log.sh
deleted file mode 100644
index 485f9bf..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_log.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.c
deleted file mode 100644
index 75b3a51..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.c
+++ /dev/null
@@ -1,216 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_sys.h"
4
5static const char *test_filename = "test_filename";
6static bool did_prof_dump_open;
7
8static int
9prof_dump_open_file_intercept(const char *filename, int mode) {
10 int fd;
11
12 did_prof_dump_open = true;
13
14 /*
15 * Stronger than a strcmp() - verifying that we internally directly use
16 * the caller supplied char pointer.
17 */
18 expect_ptr_eq(filename, test_filename,
19 "Dump file name should be \"%s\"", test_filename);
20
21 fd = open("/dev/null", O_WRONLY);
22 assert_d_ne(fd, -1, "Unexpected open() failure");
23
24 return fd;
25}
26
27TEST_BEGIN(test_mdump_normal) {
28 test_skip_if(!config_prof);
29
30 prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
31
32 void *p = mallocx(1, 0);
33 assert_ptr_not_null(p, "Unexpected mallocx() failure");
34
35 prof_dump_open_file = prof_dump_open_file_intercept;
36 did_prof_dump_open = false;
37 expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
38 sizeof(test_filename)), 0,
39 "Unexpected mallctl failure while dumping");
40 expect_true(did_prof_dump_open, "Expected a profile dump");
41
42 dallocx(p, 0);
43
44 prof_dump_open_file = open_file_orig;
45}
46TEST_END
47
48static int
49prof_dump_open_file_error(const char *filename, int mode) {
50 return -1;
51}
52
53/*
54 * In the context of test_mdump_output_error, prof_dump_write_file_count is the
55 * total number of times prof_dump_write_file_error() is expected to be called.
56 * In the context of test_mdump_maps_error, prof_dump_write_file_count is the
57 * total number of times prof_dump_write_file_error() is expected to be called
58 * starting from the one that contains an 'M' (beginning the "MAPPED_LIBRARIES"
59 * header).
60 */
61static int prof_dump_write_file_count;
62
63static ssize_t
64prof_dump_write_file_error(int fd, const void *s, size_t len) {
65 --prof_dump_write_file_count;
66
67 expect_d_ge(prof_dump_write_file_count, 0,
68 "Write is called after error occurs");
69
70 if (prof_dump_write_file_count == 0) {
71 return -1;
72 } else {
73 /*
74 * Any non-negative number indicates success, and for
75 * simplicity we just use 0. When prof_dump_write_file_count
76 * is positive, it means that we haven't reached the write that
77 * we want to fail; when prof_dump_write_file_count is
78 * negative, it means that we've already violated the
79 * expect_d_ge(prof_dump_write_file_count, 0) statement above,
80 * but instead of aborting, we continue the rest of the test,
81 * and we indicate that all the writes after the failed write
82 * are successful.
83 */
84 return 0;
85 }
86}
87
88static void
89expect_write_failure(int count) {
90 prof_dump_write_file_count = count;
91 expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
92 sizeof(test_filename)), EFAULT, "Dump should err");
93 expect_d_eq(prof_dump_write_file_count, 0,
94 "Dumping stopped after a wrong number of writes");
95}
96
97TEST_BEGIN(test_mdump_output_error) {
98 test_skip_if(!config_prof);
99 test_skip_if(!config_debug);
100
101 prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
102 prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
103
104 prof_dump_write_file = prof_dump_write_file_error;
105
106 void *p = mallocx(1, 0);
107 assert_ptr_not_null(p, "Unexpected mallocx() failure");
108
109 /*
110 * When opening the dump file fails, there shouldn't be any write, and
111 * mallctl() should return failure.
112 */
113 prof_dump_open_file = prof_dump_open_file_error;
114 expect_write_failure(0);
115
116 /*
117 * When the n-th write fails, there shouldn't be any more write, and
118 * mallctl() should return failure.
119 */
120 prof_dump_open_file = prof_dump_open_file_intercept;
121 expect_write_failure(1); /* First write fails. */
122 expect_write_failure(2); /* Second write fails. */
123
124 dallocx(p, 0);
125
126 prof_dump_open_file = open_file_orig;
127 prof_dump_write_file = write_file_orig;
128}
129TEST_END
130
131static int
132prof_dump_open_maps_error() {
133 return -1;
134}
135
136static bool started_piping_maps_file;
137
138static ssize_t
139prof_dump_write_maps_file_error(int fd, const void *s, size_t len) {
140 /* The main dump doesn't contain any capital 'M'. */
141 if (!started_piping_maps_file && strchr(s, 'M') != NULL) {
142 started_piping_maps_file = true;
143 }
144
145 if (started_piping_maps_file) {
146 return prof_dump_write_file_error(fd, s, len);
147 } else {
148 /* Return success when we haven't started piping maps. */
149 return 0;
150 }
151}
152
153static void
154expect_maps_write_failure(int count) {
155 int mfd = prof_dump_open_maps();
156 if (mfd == -1) {
157 /* No need to continue if we just can't find the maps file. */
158 return;
159 }
160 close(mfd);
161 started_piping_maps_file = false;
162 expect_write_failure(count);
163 expect_true(started_piping_maps_file, "Should start piping maps");
164}
165
166TEST_BEGIN(test_mdump_maps_error) {
167 test_skip_if(!config_prof);
168 test_skip_if(!config_debug);
169
170 prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
171 prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
172 prof_dump_open_maps_t *open_maps_orig = prof_dump_open_maps;
173
174 prof_dump_open_file = prof_dump_open_file_intercept;
175 prof_dump_write_file = prof_dump_write_maps_file_error;
176
177 void *p = mallocx(1, 0);
178 assert_ptr_not_null(p, "Unexpected mallocx() failure");
179
180 /*
181 * When opening the maps file fails, there shouldn't be any maps write,
182 * and mallctl() should return success.
183 */
184 prof_dump_open_maps = prof_dump_open_maps_error;
185 started_piping_maps_file = false;
186 prof_dump_write_file_count = 0;
187 expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
188 sizeof(test_filename)), 0,
189 "mallctl should not fail in case of maps file opening failure");
190 expect_false(started_piping_maps_file, "Shouldn't start piping maps");
191 expect_d_eq(prof_dump_write_file_count, 0,
192 "Dumping stopped after a wrong number of writes");
193
194 /*
195 * When the n-th maps write fails (given that we are able to find the
196 * maps file), there shouldn't be any more maps write, and mallctl()
197 * should return failure.
198 */
199 prof_dump_open_maps = open_maps_orig;
200 expect_maps_write_failure(1); /* First write fails. */
201 expect_maps_write_failure(2); /* Second write fails. */
202
203 dallocx(p, 0);
204
205 prof_dump_open_file = open_file_orig;
206 prof_dump_write_file = write_file_orig;
207}
208TEST_END
209
210int
211main(void) {
212 return test(
213 test_mdump_normal,
214 test_mdump_output_error,
215 test_mdump_maps_error);
216}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.sh
deleted file mode 100644
index d14cb8c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_mdump.sh
+++ /dev/null
@@ -1,6 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,lg_prof_sample:0"
5fi
6
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.c
deleted file mode 100644
index 4fb3723..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.c
+++ /dev/null
@@ -1,678 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_recent.h"
4
5/* As specified in the shell script */
6#define OPT_ALLOC_MAX 3
7
8/* Invariant before and after every test (when config_prof is on) */
9static void
10confirm_prof_setup() {
11 /* Options */
12 assert_true(opt_prof, "opt_prof not on");
13 assert_true(opt_prof_active, "opt_prof_active not on");
14 assert_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
15 "opt_prof_recent_alloc_max not set correctly");
16
17 /* Dynamics */
18 assert_true(prof_active_state, "prof_active not on");
19 assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
20 "prof_recent_alloc_max not set correctly");
21}
22
23TEST_BEGIN(test_confirm_setup) {
24 test_skip_if(!config_prof);
25 confirm_prof_setup();
26}
27TEST_END
28
29TEST_BEGIN(test_prof_recent_off) {
30 test_skip_if(config_prof);
31
32 const ssize_t past_ref = 0, future_ref = 0;
33 const size_t len_ref = sizeof(ssize_t);
34
35 ssize_t past = past_ref, future = future_ref;
36 size_t len = len_ref;
37
38#define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do { \
39 assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c, \
40 d), ENOENT, "Should return ENOENT when config_prof is off");\
41 assert_zd_eq(past, past_ref, "output was touched"); \
42 assert_zu_eq(len, len_ref, "output length was touched"); \
43 assert_zd_eq(future, future_ref, "input was touched"); \
44} while (0)
45
46 ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0);
47 ASSERT_SHOULD_FAIL("alloc_max", &past, &len, NULL, 0);
48 ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, &future, len);
49 ASSERT_SHOULD_FAIL("alloc_max", &past, &len, &future, len);
50
51#undef ASSERT_SHOULD_FAIL
52}
53TEST_END
54
55TEST_BEGIN(test_prof_recent_on) {
56 test_skip_if(!config_prof);
57
58 ssize_t past, future;
59 size_t len = sizeof(ssize_t);
60
61 confirm_prof_setup();
62
63 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
64 NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed");
65 confirm_prof_setup();
66
67 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
68 &past, &len, NULL, 0), 0, "Read error");
69 expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
70 future = OPT_ALLOC_MAX + 1;
71 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
72 NULL, NULL, &future, len), 0, "Write error");
73 future = -1;
74 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
75 &past, &len, &future, len), 0, "Read/write error");
76 expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
77 future = -2;
78 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
79 &past, &len, &future, len), EINVAL,
80 "Invalid write should return EINVAL");
81 expect_zd_eq(past, OPT_ALLOC_MAX + 1,
82 "Output should not be touched given invalid write");
83 future = OPT_ALLOC_MAX;
84 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
85 &past, &len, &future, len), 0, "Read/write error");
86 expect_zd_eq(past, -1, "Wrong read result");
87 future = OPT_ALLOC_MAX + 2;
88 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
89 &past, &len, &future, len * 2), EINVAL,
90 "Invalid write should return EINVAL");
91 expect_zd_eq(past, -1,
92 "Output should not be touched given invalid write");
93
94 confirm_prof_setup();
95}
96TEST_END
97
98/* Reproducible sequence of request sizes */
99#define NTH_REQ_SIZE(n) ((n) * 97 + 101)
100
101static void
102confirm_malloc(void *p) {
103 assert_ptr_not_null(p, "malloc failed unexpectedly");
104 edata_t *e = emap_edata_lookup(TSDN_NULL, &arena_emap_global, p);
105 assert_ptr_not_null(e, "NULL edata for living pointer");
106 prof_recent_t *n = edata_prof_recent_alloc_get_no_lock_test(e);
107 assert_ptr_not_null(n, "Record in edata should not be NULL");
108 expect_ptr_not_null(n->alloc_tctx,
109 "alloc_tctx in record should not be NULL");
110 expect_ptr_eq(e, prof_recent_alloc_edata_get_no_lock_test(n),
111 "edata pointer in record is not correct");
112 expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
113}
114
115static void
116confirm_record_size(prof_recent_t *n, unsigned kth) {
117 expect_zu_eq(n->size, NTH_REQ_SIZE(kth),
118 "Recorded allocation size is wrong");
119}
120
121static void
122confirm_record_living(prof_recent_t *n) {
123 expect_ptr_not_null(n->alloc_tctx,
124 "alloc_tctx in record should not be NULL");
125 edata_t *edata = prof_recent_alloc_edata_get_no_lock_test(n);
126 assert_ptr_not_null(edata,
127 "Recorded edata should not be NULL for living pointer");
128 expect_ptr_eq(n, edata_prof_recent_alloc_get_no_lock_test(edata),
129 "Record in edata is not correct");
130 expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
131}
132
133static void
134confirm_record_released(prof_recent_t *n) {
135 expect_ptr_not_null(n->alloc_tctx,
136 "alloc_tctx in record should not be NULL");
137 expect_ptr_null(prof_recent_alloc_edata_get_no_lock_test(n),
138 "Recorded edata should be NULL for released pointer");
139 expect_ptr_not_null(n->dalloc_tctx,
140 "dalloc_tctx in record should not be NULL for released pointer");
141}
142
143TEST_BEGIN(test_prof_recent_alloc) {
144 test_skip_if(!config_prof);
145
146 bool b;
147 unsigned i, c;
148 size_t req_size;
149 void *p;
150 prof_recent_t *n;
151 ssize_t future;
152
153 confirm_prof_setup();
154
155 /*
156 * First batch of 2 * OPT_ALLOC_MAX allocations. After the
157 * (OPT_ALLOC_MAX - 1)'th allocation the recorded allocations should
158 * always be the last OPT_ALLOC_MAX allocations coming from here.
159 */
160 for (i = 0; i < 2 * OPT_ALLOC_MAX; ++i) {
161 req_size = NTH_REQ_SIZE(i);
162 p = malloc(req_size);
163 confirm_malloc(p);
164 if (i < OPT_ALLOC_MAX - 1) {
165 assert_false(ql_empty(&prof_recent_alloc_list),
166 "Empty recent allocation");
167 free(p);
168 /*
169 * The recorded allocations may still include some
170 * other allocations before the test run started,
171 * so keep allocating without checking anything.
172 */
173 continue;
174 }
175 c = 0;
176 ql_foreach(n, &prof_recent_alloc_list, link) {
177 ++c;
178 confirm_record_size(n, i + c - OPT_ALLOC_MAX);
179 if (c == OPT_ALLOC_MAX) {
180 confirm_record_living(n);
181 } else {
182 confirm_record_released(n);
183 }
184 }
185 assert_u_eq(c, OPT_ALLOC_MAX,
186 "Incorrect total number of allocations");
187 free(p);
188 }
189
190 confirm_prof_setup();
191
192 b = false;
193 assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
194 "mallctl for turning off prof_active failed");
195
196 /*
197 * Second batch of OPT_ALLOC_MAX allocations. Since prof_active is
198 * turned off, this batch shouldn't be recorded.
199 */
200 for (; i < 3 * OPT_ALLOC_MAX; ++i) {
201 req_size = NTH_REQ_SIZE(i);
202 p = malloc(req_size);
203 assert_ptr_not_null(p, "malloc failed unexpectedly");
204 c = 0;
205 ql_foreach(n, &prof_recent_alloc_list, link) {
206 confirm_record_size(n, c + OPT_ALLOC_MAX);
207 confirm_record_released(n);
208 ++c;
209 }
210 assert_u_eq(c, OPT_ALLOC_MAX,
211 "Incorrect total number of allocations");
212 free(p);
213 }
214
215 b = true;
216 assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
217 "mallctl for turning on prof_active failed");
218
219 confirm_prof_setup();
220
221 /*
222 * Third batch of OPT_ALLOC_MAX allocations. Since prof_active is
223 * turned back on, they should be recorded, and in the list of recorded
224 * allocations they should follow the first batch rather than the
225 * second batch.
226 */
227 for (; i < 4 * OPT_ALLOC_MAX; ++i) {
228 req_size = NTH_REQ_SIZE(i);
229 p = malloc(req_size);
230 confirm_malloc(p);
231 c = 0;
232 ql_foreach(n, &prof_recent_alloc_list, link) {
233 ++c;
234 confirm_record_size(n,
235 /* Is the allocation from the third batch? */
236 i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX ?
237 /* If yes, then it's just recorded. */
238 i + c - OPT_ALLOC_MAX :
239 /*
240 * Otherwise, it should come from the first batch
241 * instead of the second batch.
242 */
243 i + c - 2 * OPT_ALLOC_MAX);
244 if (c == OPT_ALLOC_MAX) {
245 confirm_record_living(n);
246 } else {
247 confirm_record_released(n);
248 }
249 }
250 assert_u_eq(c, OPT_ALLOC_MAX,
251 "Incorrect total number of allocations");
252 free(p);
253 }
254
255 /* Increasing the limit shouldn't alter the list of records. */
256 future = OPT_ALLOC_MAX + 1;
257 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
258 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
259 c = 0;
260 ql_foreach(n, &prof_recent_alloc_list, link) {
261 confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
262 confirm_record_released(n);
263 ++c;
264 }
265 assert_u_eq(c, OPT_ALLOC_MAX,
266 "Incorrect total number of allocations");
267
268 /*
269 * Decreasing the limit shouldn't alter the list of records as long as
270 * the new limit is still no less than the length of the list.
271 */
272 future = OPT_ALLOC_MAX;
273 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
274 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
275 c = 0;
276 ql_foreach(n, &prof_recent_alloc_list, link) {
277 confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
278 confirm_record_released(n);
279 ++c;
280 }
281 assert_u_eq(c, OPT_ALLOC_MAX,
282 "Incorrect total number of allocations");
283
284 /*
285 * Decreasing the limit should shorten the list of records if the new
286 * limit is less than the length of the list.
287 */
288 future = OPT_ALLOC_MAX - 1;
289 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
290 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
291 c = 0;
292 ql_foreach(n, &prof_recent_alloc_list, link) {
293 ++c;
294 confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
295 confirm_record_released(n);
296 }
297 assert_u_eq(c, OPT_ALLOC_MAX - 1,
298 "Incorrect total number of allocations");
299
300 /* Setting to unlimited shouldn't alter the list of records. */
301 future = -1;
302 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
303 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
304 c = 0;
305 ql_foreach(n, &prof_recent_alloc_list, link) {
306 ++c;
307 confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
308 confirm_record_released(n);
309 }
310 assert_u_eq(c, OPT_ALLOC_MAX - 1,
311 "Incorrect total number of allocations");
312
313 /* Downshift to only one record. */
314 future = 1;
315 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
316 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
317 assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty");
318 n = ql_first(&prof_recent_alloc_list);
319 confirm_record_size(n, 4 * OPT_ALLOC_MAX - 1);
320 confirm_record_released(n);
321 n = ql_next(&prof_recent_alloc_list, n, link);
322 assert_ptr_null(n, "Recent list should only contain one record");
323
324 /* Completely turn off. */
325 future = 0;
326 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
327 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
328 assert_true(ql_empty(&prof_recent_alloc_list),
329 "Recent list should be empty");
330
331 /* Restore the settings. */
332 future = OPT_ALLOC_MAX;
333 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
334 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
335 assert_true(ql_empty(&prof_recent_alloc_list),
336 "Recent list should be empty");
337
338 confirm_prof_setup();
339}
340TEST_END
341
342#undef NTH_REQ_SIZE
343
344#define DUMP_OUT_SIZE 4096
345static char dump_out[DUMP_OUT_SIZE];
346static size_t dump_out_len = 0;
347
348static void
349test_dump_write_cb(void *not_used, const char *str) {
350 size_t len = strlen(str);
351 assert(dump_out_len + len < DUMP_OUT_SIZE);
352 memcpy(dump_out + dump_out_len, str, len + 1);
353 dump_out_len += len;
354}
355
356static void
357call_dump() {
358 static void *in[2] = {test_dump_write_cb, NULL};
359 dump_out_len = 0;
360 assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",
361 NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error");
362}
363
364typedef struct {
365 size_t size;
366 size_t usize;
367 bool released;
368} confirm_record_t;
369
370#define DUMP_ERROR "Dump output is wrong"
371
372static void
373confirm_record(const char *template, const confirm_record_t *records,
374 const size_t n_records) {
375 static const char *types[2] = {"alloc", "dalloc"};
376 static char buf[64];
377
378 /*
379 * The template string would be in the form of:
380 * "{...,\"recent_alloc\":[]}",
381 * and dump_out would be in the form of:
382 * "{...,\"recent_alloc\":[...]}".
383 * Using "- 2" serves to cut right before the ending "]}".
384 */
385 assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
386 DUMP_ERROR);
387 assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
388 template + strlen(template) - 2, 2), 0, DUMP_ERROR);
389
390 const char *start = dump_out + strlen(template) - 2;
391 const char *end = dump_out + strlen(dump_out) - 2;
392 const confirm_record_t *record;
393 for (record = records; record < records + n_records; ++record) {
394
395#define ASSERT_CHAR(c) do { \
396 assert_true(start < end, DUMP_ERROR); \
397 assert_c_eq(*start++, c, DUMP_ERROR); \
398} while (0)
399
400#define ASSERT_STR(s) do { \
401 const size_t len = strlen(s); \
402 assert_true(start + len <= end, DUMP_ERROR); \
403 assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \
404 start += len; \
405} while (0)
406
407#define ASSERT_FORMATTED_STR(s, ...) do { \
408 malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__); \
409 ASSERT_STR(buf); \
410} while (0)
411
412 if (record != records) {
413 ASSERT_CHAR(',');
414 }
415
416 ASSERT_CHAR('{');
417
418 ASSERT_STR("\"size\"");
419 ASSERT_CHAR(':');
420 ASSERT_FORMATTED_STR("%zu", record->size);
421 ASSERT_CHAR(',');
422
423 ASSERT_STR("\"usize\"");
424 ASSERT_CHAR(':');
425 ASSERT_FORMATTED_STR("%zu", record->usize);
426 ASSERT_CHAR(',');
427
428 ASSERT_STR("\"released\"");
429 ASSERT_CHAR(':');
430 ASSERT_STR(record->released ? "true" : "false");
431 ASSERT_CHAR(',');
432
433 const char **type = types;
434 while (true) {
435 ASSERT_FORMATTED_STR("\"%s_thread_uid\"", *type);
436 ASSERT_CHAR(':');
437 while (isdigit(*start)) {
438 ++start;
439 }
440 ASSERT_CHAR(',');
441
442 if (opt_prof_sys_thread_name) {
443 ASSERT_FORMATTED_STR("\"%s_thread_name\"",
444 *type);
445 ASSERT_CHAR(':');
446 ASSERT_CHAR('"');
447 while (*start != '"') {
448 ++start;
449 }
450 ASSERT_CHAR('"');
451 ASSERT_CHAR(',');
452 }
453
454 ASSERT_FORMATTED_STR("\"%s_time\"", *type);
455 ASSERT_CHAR(':');
456 while (isdigit(*start)) {
457 ++start;
458 }
459 ASSERT_CHAR(',');
460
461 ASSERT_FORMATTED_STR("\"%s_trace\"", *type);
462 ASSERT_CHAR(':');
463 ASSERT_CHAR('[');
464 while (isdigit(*start) || *start == 'x' ||
465 (*start >= 'a' && *start <= 'f') ||
466 *start == '\"' || *start == ',') {
467 ++start;
468 }
469 ASSERT_CHAR(']');
470
471 if (strcmp(*type, "dalloc") == 0) {
472 break;
473 }
474
475 assert(strcmp(*type, "alloc") == 0);
476 if (!record->released) {
477 break;
478 }
479
480 ASSERT_CHAR(',');
481 ++type;
482 }
483
484 ASSERT_CHAR('}');
485
486#undef ASSERT_FORMATTED_STR
487#undef ASSERT_STR
488#undef ASSERT_CHAR
489
490 }
491 assert_ptr_eq(record, records + n_records, DUMP_ERROR);
492 assert_ptr_eq(start, end, DUMP_ERROR);
493}
494
495TEST_BEGIN(test_prof_recent_alloc_dump) {
496 test_skip_if(!config_prof);
497
498 confirm_prof_setup();
499
500 ssize_t future;
501 void *p, *q;
502 confirm_record_t records[2];
503
504 assert_zu_eq(lg_prof_sample, (size_t)0,
505 "lg_prof_sample not set correctly");
506
507 future = 0;
508 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
509 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
510 call_dump();
511 expect_str_eq(dump_out, "{\"sample_interval\":1,"
512 "\"recent_alloc_max\":0,\"recent_alloc\":[]}", DUMP_ERROR);
513
514 future = 2;
515 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
516 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
517 call_dump();
518 const char *template = "{\"sample_interval\":1,"
519 "\"recent_alloc_max\":2,\"recent_alloc\":[]}";
520 expect_str_eq(dump_out, template, DUMP_ERROR);
521
522 p = malloc(7);
523 call_dump();
524 records[0].size = 7;
525 records[0].usize = sz_s2u(7);
526 records[0].released = false;
527 confirm_record(template, records, 1);
528
529 q = mallocx(17, MALLOCX_ALIGN(128));
530 call_dump();
531 records[1].size = 17;
532 records[1].usize = sz_sa2u(17, 128);
533 records[1].released = false;
534 confirm_record(template, records, 2);
535
536 free(q);
537 call_dump();
538 records[1].released = true;
539 confirm_record(template, records, 2);
540
541 free(p);
542 call_dump();
543 records[0].released = true;
544 confirm_record(template, records, 2);
545
546 future = OPT_ALLOC_MAX;
547 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
548 NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
549 confirm_prof_setup();
550}
551TEST_END
552
553#undef DUMP_ERROR
554#undef DUMP_OUT_SIZE
555
556#define N_THREADS 8
557#define N_PTRS 512
558#define N_CTLS 8
559#define N_ITERS 2048
560#define STRESS_ALLOC_MAX 4096
561
562typedef struct {
563 thd_t thd;
564 size_t id;
565 void *ptrs[N_PTRS];
566 size_t count;
567} thd_data_t;
568
569static thd_data_t thd_data[N_THREADS];
570static ssize_t test_max;
571
572static void
573test_write_cb(void *cbopaque, const char *str) {
574 sleep_ns(1000 * 1000);
575}
576
577static void *
578f_thread(void *arg) {
579 const size_t thd_id = *(size_t *)arg;
580 thd_data_t *data_p = thd_data + thd_id;
581 assert(data_p->id == thd_id);
582 data_p->count = 0;
583 uint64_t rand = (uint64_t)thd_id;
584 tsd_t *tsd = tsd_fetch();
585 assert(test_max > 1);
586 ssize_t last_max = -1;
587 for (int i = 0; i < N_ITERS; i++) {
588 rand = prng_range_u64(&rand, N_PTRS + N_CTLS * 5);
589 assert(data_p->count <= N_PTRS);
590 if (rand < data_p->count) {
591 assert(data_p->count > 0);
592 if (rand != data_p->count - 1) {
593 assert(data_p->count > 1);
594 void *temp = data_p->ptrs[rand];
595 data_p->ptrs[rand] =
596 data_p->ptrs[data_p->count - 1];
597 data_p->ptrs[data_p->count - 1] = temp;
598 }
599 free(data_p->ptrs[--data_p->count]);
600 } else if (rand < N_PTRS) {
601 assert(data_p->count < N_PTRS);
602 data_p->ptrs[data_p->count++] = malloc(1);
603 } else if (rand % 5 == 0) {
604 prof_recent_alloc_dump(tsd, test_write_cb, NULL);
605 } else if (rand % 5 == 1) {
606 last_max = prof_recent_alloc_max_ctl_read();
607 } else if (rand % 5 == 2) {
608 last_max =
609 prof_recent_alloc_max_ctl_write(tsd, test_max * 2);
610 } else if (rand % 5 == 3) {
611 last_max =
612 prof_recent_alloc_max_ctl_write(tsd, test_max);
613 } else {
614 assert(rand % 5 == 4);
615 last_max =
616 prof_recent_alloc_max_ctl_write(tsd, test_max / 2);
617 }
618 assert_zd_ge(last_max, -1, "Illegal last-N max");
619 }
620
621 while (data_p->count > 0) {
622 free(data_p->ptrs[--data_p->count]);
623 }
624
625 return NULL;
626}
627
628TEST_BEGIN(test_prof_recent_stress) {
629 test_skip_if(!config_prof);
630
631 confirm_prof_setup();
632
633 test_max = OPT_ALLOC_MAX;
634 for (size_t i = 0; i < N_THREADS; i++) {
635 thd_data_t *data_p = thd_data + i;
636 data_p->id = i;
637 thd_create(&data_p->thd, &f_thread, &data_p->id);
638 }
639 for (size_t i = 0; i < N_THREADS; i++) {
640 thd_data_t *data_p = thd_data + i;
641 thd_join(data_p->thd, NULL);
642 }
643
644 test_max = STRESS_ALLOC_MAX;
645 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
646 NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
647 for (size_t i = 0; i < N_THREADS; i++) {
648 thd_data_t *data_p = thd_data + i;
649 data_p->id = i;
650 thd_create(&data_p->thd, &f_thread, &data_p->id);
651 }
652 for (size_t i = 0; i < N_THREADS; i++) {
653 thd_data_t *data_p = thd_data + i;
654 thd_join(data_p->thd, NULL);
655 }
656
657 test_max = OPT_ALLOC_MAX;
658 assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
659 NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
660 confirm_prof_setup();
661}
662TEST_END
663
664#undef STRESS_ALLOC_MAX
665#undef N_ITERS
666#undef N_PTRS
667#undef N_THREADS
668
669int
670main(void) {
671 return test(
672 test_confirm_setup,
673 test_prof_recent_off,
674 test_prof_recent_on,
675 test_prof_recent_alloc,
676 test_prof_recent_alloc_dump,
677 test_prof_recent_stress);
678}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.sh
deleted file mode 100644
index 58a54a4..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_recent.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_recent_alloc_max:3"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.c
deleted file mode 100644
index 9b33b20..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.c
+++ /dev/null
@@ -1,266 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_data.h"
4#include "jemalloc/internal/prof_sys.h"
5
6static int
7prof_dump_open_file_intercept(const char *filename, int mode) {
8 int fd;
9
10 fd = open("/dev/null", O_WRONLY);
11 assert_d_ne(fd, -1, "Unexpected open() failure");
12
13 return fd;
14}
15
16static void
17set_prof_active(bool active) {
18 expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
19 sizeof(active)), 0, "Unexpected mallctl failure");
20}
21
22static size_t
23get_lg_prof_sample(void) {
24 size_t ret;
25 size_t sz = sizeof(size_t);
26
27 expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
28 "Unexpected mallctl failure while reading profiling sample rate");
29 return ret;
30}
31
32static void
33do_prof_reset(size_t lg_prof_sample_input) {
34 expect_d_eq(mallctl("prof.reset", NULL, NULL,
35 (void *)&lg_prof_sample_input, sizeof(size_t)), 0,
36 "Unexpected mallctl failure while resetting profile data");
37 expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
38 "Expected profile sample rate change");
39}
40
41TEST_BEGIN(test_prof_reset_basic) {
42 size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
43 size_t sz;
44 unsigned i;
45
46 test_skip_if(!config_prof);
47
48 sz = sizeof(size_t);
49 expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
50 &sz, NULL, 0), 0,
51 "Unexpected mallctl failure while reading profiling sample rate");
52 expect_zu_eq(lg_prof_sample_orig, 0,
53 "Unexpected profiling sample rate");
54 lg_prof_sample_cur = get_lg_prof_sample();
55 expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
56 "Unexpected disagreement between \"opt.lg_prof_sample\" and "
57 "\"prof.lg_sample\"");
58
59 /* Test simple resets. */
60 for (i = 0; i < 2; i++) {
61 expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
62 "Unexpected mallctl failure while resetting profile data");
63 lg_prof_sample_cur = get_lg_prof_sample();
64 expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
65 "Unexpected profile sample rate change");
66 }
67
68 /* Test resets with prof.lg_sample changes. */
69 lg_prof_sample_next = 1;
70 for (i = 0; i < 2; i++) {
71 do_prof_reset(lg_prof_sample_next);
72 lg_prof_sample_cur = get_lg_prof_sample();
73 expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
74 "Expected profile sample rate change");
75 lg_prof_sample_next = lg_prof_sample_orig;
76 }
77
78 /* Make sure the test code restored prof.lg_sample. */
79 lg_prof_sample_cur = get_lg_prof_sample();
80 expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
81 "Unexpected disagreement between \"opt.lg_prof_sample\" and "
82 "\"prof.lg_sample\"");
83}
84TEST_END
85
86TEST_BEGIN(test_prof_reset_cleanup) {
87 test_skip_if(!config_prof);
88
89 set_prof_active(true);
90
91 expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
92 void *p = mallocx(1, 0);
93 expect_ptr_not_null(p, "Unexpected mallocx() failure");
94 expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
95
96 prof_cnt_t cnt_all;
97 prof_cnt_all(&cnt_all);
98 expect_u64_eq(cnt_all.curobjs, 1, "Expected 1 allocation");
99
100 expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
101 "Unexpected error while resetting heap profile data");
102 prof_cnt_all(&cnt_all);
103 expect_u64_eq(cnt_all.curobjs, 0, "Expected 0 allocations");
104 expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
105
106 dallocx(p, 0);
107 expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
108
109 set_prof_active(false);
110}
111TEST_END
112
113#define NTHREADS 4
114#define NALLOCS_PER_THREAD (1U << 13)
115#define OBJ_RING_BUF_COUNT 1531
116#define RESET_INTERVAL (1U << 10)
117#define DUMP_INTERVAL 3677
118static void *
119thd_start(void *varg) {
120 unsigned thd_ind = *(unsigned *)varg;
121 unsigned i;
122 void *objs[OBJ_RING_BUF_COUNT];
123
124 memset(objs, 0, sizeof(objs));
125
126 for (i = 0; i < NALLOCS_PER_THREAD; i++) {
127 if (i % RESET_INTERVAL == 0) {
128 expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
129 0, "Unexpected error while resetting heap profile "
130 "data");
131 }
132
133 if (i % DUMP_INTERVAL == 0) {
134 expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
135 0, "Unexpected error while dumping heap profile");
136 }
137
138 {
139 void **pp = &objs[i % OBJ_RING_BUF_COUNT];
140 if (*pp != NULL) {
141 dallocx(*pp, 0);
142 *pp = NULL;
143 }
144 *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
145 expect_ptr_not_null(*pp,
146 "Unexpected btalloc() failure");
147 }
148 }
149
150 /* Clean up any remaining objects. */
151 for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
152 void **pp = &objs[i % OBJ_RING_BUF_COUNT];
153 if (*pp != NULL) {
154 dallocx(*pp, 0);
155 *pp = NULL;
156 }
157 }
158
159 return NULL;
160}
161
162TEST_BEGIN(test_prof_reset) {
163 size_t lg_prof_sample_orig;
164 thd_t thds[NTHREADS];
165 unsigned thd_args[NTHREADS];
166 unsigned i;
167 size_t bt_count, tdata_count;
168
169 test_skip_if(!config_prof);
170
171 bt_count = prof_bt_count();
172 expect_zu_eq(bt_count, 0,
173 "Unexpected pre-existing tdata structures");
174 tdata_count = prof_tdata_count();
175
176 lg_prof_sample_orig = get_lg_prof_sample();
177 do_prof_reset(5);
178
179 set_prof_active(true);
180
181 for (i = 0; i < NTHREADS; i++) {
182 thd_args[i] = i;
183 thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
184 }
185 for (i = 0; i < NTHREADS; i++) {
186 thd_join(thds[i], NULL);
187 }
188
189 expect_zu_eq(prof_bt_count(), bt_count,
190 "Unexpected bactrace count change");
191 expect_zu_eq(prof_tdata_count(), tdata_count,
192 "Unexpected remaining tdata structures");
193
194 set_prof_active(false);
195
196 do_prof_reset(lg_prof_sample_orig);
197}
198TEST_END
199#undef NTHREADS
200#undef NALLOCS_PER_THREAD
201#undef OBJ_RING_BUF_COUNT
202#undef RESET_INTERVAL
203#undef DUMP_INTERVAL
204
205/* Test sampling at the same allocation site across resets. */
206#define NITER 10
207TEST_BEGIN(test_xallocx) {
208 size_t lg_prof_sample_orig;
209 unsigned i;
210 void *ptrs[NITER];
211
212 test_skip_if(!config_prof);
213
214 lg_prof_sample_orig = get_lg_prof_sample();
215 set_prof_active(true);
216
217 /* Reset profiling. */
218 do_prof_reset(0);
219
220 for (i = 0; i < NITER; i++) {
221 void *p;
222 size_t sz, nsz;
223
224 /* Reset profiling. */
225 do_prof_reset(0);
226
227 /* Allocate small object (which will be promoted). */
228 p = ptrs[i] = mallocx(1, 0);
229 expect_ptr_not_null(p, "Unexpected mallocx() failure");
230
231 /* Reset profiling. */
232 do_prof_reset(0);
233
234 /* Perform successful xallocx(). */
235 sz = sallocx(p, 0);
236 expect_zu_eq(xallocx(p, sz, 0, 0), sz,
237 "Unexpected xallocx() failure");
238
239 /* Perform unsuccessful xallocx(). */
240 nsz = nallocx(sz+1, 0);
241 expect_zu_eq(xallocx(p, nsz, 0, 0), sz,
242 "Unexpected xallocx() success");
243 }
244
245 for (i = 0; i < NITER; i++) {
246 /* dallocx. */
247 dallocx(ptrs[i], 0);
248 }
249
250 set_prof_active(false);
251 do_prof_reset(lg_prof_sample_orig);
252}
253TEST_END
254#undef NITER
255
256int
257main(void) {
258 /* Intercept dumping prior to running any tests. */
259 prof_dump_open_file = prof_dump_open_file_intercept;
260
261 return test_no_reentrancy(
262 test_prof_reset_basic,
263 test_prof_reset_cleanup,
264 test_prof_reset,
265 test_xallocx);
266}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.sh
deleted file mode 100644
index daefeb7..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_reset.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0,prof_recent_alloc_max:0"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.c
deleted file mode 100644
index c88c4ae..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.c
+++ /dev/null
@@ -1,151 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define N_PTRS 3
4
5static void
6test_combinations(szind_t ind, size_t sizes_array[N_PTRS],
7 int flags_array[N_PTRS]) {
8#define MALLCTL_STR_LEN 64
9 assert(opt_prof && opt_prof_stats);
10
11 char mallctl_live_str[MALLCTL_STR_LEN];
12 char mallctl_accum_str[MALLCTL_STR_LEN];
13 if (ind < SC_NBINS) {
14 malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
15 "prof.stats.bins.%u.live", (unsigned)ind);
16 malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
17 "prof.stats.bins.%u.accum", (unsigned)ind);
18 } else {
19 malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
20 "prof.stats.lextents.%u.live", (unsigned)(ind - SC_NBINS));
21 malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
22 "prof.stats.lextents.%u.accum", (unsigned)(ind - SC_NBINS));
23 }
24
25 size_t stats_len = 2 * sizeof(uint64_t);
26
27 uint64_t live_stats_orig[2];
28 assert_d_eq(mallctl(mallctl_live_str, &live_stats_orig, &stats_len,
29 NULL, 0), 0, "");
30 uint64_t accum_stats_orig[2];
31 assert_d_eq(mallctl(mallctl_accum_str, &accum_stats_orig, &stats_len,
32 NULL, 0), 0, "");
33
34 void *ptrs[N_PTRS];
35
36 uint64_t live_req_sum = 0;
37 uint64_t live_count = 0;
38 uint64_t accum_req_sum = 0;
39 uint64_t accum_count = 0;
40
41 for (size_t i = 0; i < N_PTRS; ++i) {
42 size_t sz = sizes_array[i];
43 int flags = flags_array[i];
44 void *p = mallocx(sz, flags);
45 assert_ptr_not_null(p, "malloc() failed");
46 assert(TEST_MALLOC_SIZE(p) == sz_index2size(ind));
47 ptrs[i] = p;
48 live_req_sum += sz;
49 live_count++;
50 accum_req_sum += sz;
51 accum_count++;
52 uint64_t live_stats[2];
53 assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
54 NULL, 0), 0, "");
55 expect_u64_eq(live_stats[0] - live_stats_orig[0],
56 live_req_sum, "");
57 expect_u64_eq(live_stats[1] - live_stats_orig[1],
58 live_count, "");
59 uint64_t accum_stats[2];
60 assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
61 NULL, 0), 0, "");
62 expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
63 accum_req_sum, "");
64 expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
65 accum_count, "");
66 }
67
68 for (size_t i = 0; i < N_PTRS; ++i) {
69 size_t sz = sizes_array[i];
70 int flags = flags_array[i];
71 sdallocx(ptrs[i], sz, flags);
72 live_req_sum -= sz;
73 live_count--;
74 uint64_t live_stats[2];
75 assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
76 NULL, 0), 0, "");
77 expect_u64_eq(live_stats[0] - live_stats_orig[0],
78 live_req_sum, "");
79 expect_u64_eq(live_stats[1] - live_stats_orig[1],
80 live_count, "");
81 uint64_t accum_stats[2];
82 assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
83 NULL, 0), 0, "");
84 expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
85 accum_req_sum, "");
86 expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
87 accum_count, "");
88 }
89#undef MALLCTL_STR_LEN
90}
91
92static void
93test_szind_wrapper(szind_t ind) {
94 size_t sizes_array[N_PTRS];
95 int flags_array[N_PTRS];
96 for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
97 ++i, ++sz) {
98 sizes_array[i] = sz;
99 flags_array[i] = 0;
100 }
101 test_combinations(ind, sizes_array, flags_array);
102}
103
104TEST_BEGIN(test_prof_stats) {
105 test_skip_if(!config_prof);
106 test_szind_wrapper(0);
107 test_szind_wrapper(1);
108 test_szind_wrapper(2);
109 test_szind_wrapper(SC_NBINS);
110 test_szind_wrapper(SC_NBINS + 1);
111 test_szind_wrapper(SC_NBINS + 2);
112}
113TEST_END
114
115static void
116test_szind_aligned_wrapper(szind_t ind, unsigned lg_align) {
117 size_t sizes_array[N_PTRS];
118 int flags_array[N_PTRS];
119 int flags = MALLOCX_LG_ALIGN(lg_align);
120 for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
121 ++i, ++sz) {
122 sizes_array[i] = sz;
123 flags_array[i] = flags;
124 }
125 test_combinations(
126 sz_size2index(sz_sa2u(sz_index2size(ind), 1 << lg_align)),
127 sizes_array, flags_array);
128}
129
130TEST_BEGIN(test_prof_stats_aligned) {
131 test_skip_if(!config_prof);
132 for (szind_t ind = 0; ind < 10; ++ind) {
133 for (unsigned lg_align = 0; lg_align < 10; ++lg_align) {
134 test_szind_aligned_wrapper(ind, lg_align);
135 }
136 }
137 for (szind_t ind = SC_NBINS - 5; ind < SC_NBINS + 5; ++ind) {
138 for (unsigned lg_align = SC_LG_LARGE_MINCLASS - 5;
139 lg_align < SC_LG_LARGE_MINCLASS + 5; ++lg_align) {
140 test_szind_aligned_wrapper(ind, lg_align);
141 }
142 }
143}
144TEST_END
145
146int
147main(void) {
148 return test(
149 test_prof_stats,
150 test_prof_stats_aligned);
151}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.sh
deleted file mode 100644
index f3c819b..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_stats.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_stats:true"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.c
deleted file mode 100644
index affc788..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.c
+++ /dev/null
@@ -1,77 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_sys.h"
4
5static const char *test_thread_name = "test_name";
6
7static int
8test_prof_sys_thread_name_read_error(char *buf, size_t limit) {
9 return ENOSYS;
10}
11
12static int
13test_prof_sys_thread_name_read(char *buf, size_t limit) {
14 assert(strlen(test_thread_name) < limit);
15 strncpy(buf, test_thread_name, limit);
16 return 0;
17}
18
19static int
20test_prof_sys_thread_name_read_clear(char *buf, size_t limit) {
21 assert(limit > 0);
22 buf[0] = '\0';
23 return 0;
24}
25
26TEST_BEGIN(test_prof_sys_thread_name) {
27 test_skip_if(!config_prof);
28
29 bool oldval;
30 size_t sz = sizeof(oldval);
31 assert_d_eq(mallctl("opt.prof_sys_thread_name", &oldval, &sz, NULL, 0),
32 0, "mallctl failed");
33 assert_true(oldval, "option was not set correctly");
34
35 const char *thread_name;
36 sz = sizeof(thread_name);
37 assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
38 "mallctl read for thread name should not fail");
39 expect_str_eq(thread_name, "", "Initial thread name should be empty");
40
41 thread_name = test_thread_name;
42 assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sz),
43 ENOENT, "mallctl write for thread name should fail");
44 assert_ptr_eq(thread_name, test_thread_name,
45 "Thread name should not be touched");
46
47 prof_sys_thread_name_read = test_prof_sys_thread_name_read_error;
48 void *p = malloc(1);
49 free(p);
50 assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
51 "mallctl read for thread name should not fail");
52 assert_str_eq(thread_name, "",
53 "Thread name should stay the same if the system call fails");
54
55 prof_sys_thread_name_read = test_prof_sys_thread_name_read;
56 p = malloc(1);
57 free(p);
58 assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
59 "mallctl read for thread name should not fail");
60 assert_str_eq(thread_name, test_thread_name,
61 "Thread name should be changed if the system call succeeds");
62
63 prof_sys_thread_name_read = test_prof_sys_thread_name_read_clear;
64 p = malloc(1);
65 free(p);
66 assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
67 "mallctl read for thread name should not fail");
68 expect_str_eq(thread_name, "", "Thread name should be updated if the "
69 "system call returns a different name");
70}
71TEST_END
72
73int
74main(void) {
75 return test(
76 test_prof_sys_thread_name);
77}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.sh
deleted file mode 100644
index 1f02a8a..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_sys_thread_name.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_sys_thread_name:true"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.c
deleted file mode 100644
index e0efdc3..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.c
+++ /dev/null
@@ -1,48 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/prof_data.h"
4
5TEST_BEGIN(test_prof_realloc) {
6 tsd_t *tsd;
7 int flags;
8 void *p, *q;
9 prof_info_t prof_info_p, prof_info_q;
10 prof_cnt_t cnt_0, cnt_1, cnt_2, cnt_3;
11
12 test_skip_if(!config_prof);
13
14 tsd = tsd_fetch();
15 flags = MALLOCX_TCACHE_NONE;
16
17 prof_cnt_all(&cnt_0);
18 p = mallocx(1024, flags);
19 expect_ptr_not_null(p, "Unexpected mallocx() failure");
20 prof_info_get(tsd, p, NULL, &prof_info_p);
21 expect_ptr_ne(prof_info_p.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
22 "Expected valid tctx");
23 prof_cnt_all(&cnt_1);
24 expect_u64_eq(cnt_0.curobjs + 1, cnt_1.curobjs,
25 "Allocation should have increased sample size");
26
27 q = rallocx(p, 2048, flags);
28 expect_ptr_ne(p, q, "Expected move");
29 expect_ptr_not_null(p, "Unexpected rmallocx() failure");
30 prof_info_get(tsd, q, NULL, &prof_info_q);
31 expect_ptr_ne(prof_info_q.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
32 "Expected valid tctx");
33 prof_cnt_all(&cnt_2);
34 expect_u64_eq(cnt_1.curobjs, cnt_2.curobjs,
35 "Reallocation should not have changed sample size");
36
37 dallocx(q, flags);
38 prof_cnt_all(&cnt_3);
39 expect_u64_eq(cnt_0.curobjs, cnt_3.curobjs,
40 "Sample size should have returned to base level");
41}
42TEST_END
43
44int
45main(void) {
46 return test_no_reentrancy(
47 test_prof_realloc);
48}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.sh
deleted file mode 100644
index 485f9bf..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_tctx.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.c b/examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.c
deleted file mode 100644
index 3c4614f..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.c
+++ /dev/null
@@ -1,122 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static void
4mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
5 int line) {
6 const char *thread_name_old;
7 size_t sz;
8
9 sz = sizeof(thread_name_old);
10 expect_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
11 NULL, 0), 0,
12 "%s():%d: Unexpected mallctl failure reading thread.prof.name",
13 func, line);
14 expect_str_eq(thread_name_old, thread_name_expected,
15 "%s():%d: Unexpected thread.prof.name value", func, line);
16}
17#define mallctl_thread_name_get(a) \
18 mallctl_thread_name_get_impl(a, __func__, __LINE__)
19
20static void
21mallctl_thread_name_set_impl(const char *thread_name, const char *func,
22 int line) {
23 expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
24 (void *)&thread_name, sizeof(thread_name)), 0,
25 "%s():%d: Unexpected mallctl failure writing thread.prof.name",
26 func, line);
27 mallctl_thread_name_get_impl(thread_name, func, line);
28}
29#define mallctl_thread_name_set(a) \
30 mallctl_thread_name_set_impl(a, __func__, __LINE__)
31
32TEST_BEGIN(test_prof_thread_name_validation) {
33 const char *thread_name;
34
35 test_skip_if(!config_prof);
36 test_skip_if(opt_prof_sys_thread_name);
37
38 mallctl_thread_name_get("");
39 mallctl_thread_name_set("hi there");
40
41 /* NULL input shouldn't be allowed. */
42 thread_name = NULL;
43 expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
44 (void *)&thread_name, sizeof(thread_name)), EFAULT,
45 "Unexpected mallctl result writing \"%s\" to thread.prof.name",
46 thread_name);
47
48 /* '\n' shouldn't be allowed. */
49 thread_name = "hi\nthere";
50 expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
51 (void *)&thread_name, sizeof(thread_name)), EFAULT,
52 "Unexpected mallctl result writing \"%s\" to thread.prof.name",
53 thread_name);
54
55 /* Simultaneous read/write shouldn't be allowed. */
56 {
57 const char *thread_name_old;
58 size_t sz;
59
60 sz = sizeof(thread_name_old);
61 expect_d_eq(mallctl("thread.prof.name",
62 (void *)&thread_name_old, &sz, (void *)&thread_name,
63 sizeof(thread_name)), EPERM,
64 "Unexpected mallctl result writing \"%s\" to "
65 "thread.prof.name", thread_name);
66 }
67
68 mallctl_thread_name_set("");
69}
70TEST_END
71
72#define NTHREADS 4
73#define NRESET 25
74static void *
75thd_start(void *varg) {
76 unsigned thd_ind = *(unsigned *)varg;
77 char thread_name[16] = "";
78 unsigned i;
79
80 malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
81
82 mallctl_thread_name_get("");
83 mallctl_thread_name_set(thread_name);
84
85 for (i = 0; i < NRESET; i++) {
86 expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
87 "Unexpected error while resetting heap profile data");
88 mallctl_thread_name_get(thread_name);
89 }
90
91 mallctl_thread_name_set(thread_name);
92 mallctl_thread_name_set("");
93
94 return NULL;
95}
96
97TEST_BEGIN(test_prof_thread_name_threaded) {
98 test_skip_if(!config_prof);
99 test_skip_if(opt_prof_sys_thread_name);
100
101 thd_t thds[NTHREADS];
102 unsigned thd_args[NTHREADS];
103 unsigned i;
104
105 for (i = 0; i < NTHREADS; i++) {
106 thd_args[i] = i;
107 thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
108 }
109 for (i = 0; i < NTHREADS; i++) {
110 thd_join(thds[i], NULL);
111 }
112}
113TEST_END
114#undef NTHREADS
115#undef NRESET
116
117int
118main(void) {
119 return test(
120 test_prof_thread_name_validation,
121 test_prof_thread_name_threaded);
122}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.sh b/examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.sh
deleted file mode 100644
index 298c105..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/prof_thread_name.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:false"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/psset.c b/examples/redis-unstable/deps/jemalloc/test/unit/psset.c
deleted file mode 100644
index 6ff7201..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/psset.c
+++ /dev/null
@@ -1,748 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/psset.h"
4
5#define PAGESLAB_ADDR ((void *)(1234 * HUGEPAGE))
6#define PAGESLAB_AGE 5678
7
8#define ALLOC_ARENA_IND 111
9#define ALLOC_ESN 222
10
11static void
12edata_init_test(edata_t *edata) {
13 memset(edata, 0, sizeof(*edata));
14 edata_arena_ind_set(edata, ALLOC_ARENA_IND);
15 edata_esn_set(edata, ALLOC_ESN);
16}
17
18static void
19test_psset_fake_purge(hpdata_t *ps) {
20 hpdata_purge_state_t purge_state;
21 hpdata_alloc_allowed_set(ps, false);
22 hpdata_purge_begin(ps, &purge_state);
23 void *addr;
24 size_t size;
25 while (hpdata_purge_next(ps, &purge_state, &addr, &size)) {
26 }
27 hpdata_purge_end(ps, &purge_state);
28 hpdata_alloc_allowed_set(ps, true);
29}
30
31static void
32test_psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata,
33 size_t size) {
34 hpdata_assert_empty(ps);
35
36 test_psset_fake_purge(ps);
37
38 psset_insert(psset, ps);
39 psset_update_begin(psset, ps);
40
41 void *addr = hpdata_reserve_alloc(ps, size);
42 edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
43 /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
44 /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
45 EXTENT_NOT_HEAD);
46 edata_ps_set(r_edata, ps);
47 psset_update_end(psset, ps);
48}
49
50static bool
51test_psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
52 hpdata_t *ps = psset_pick_alloc(psset, size);
53 if (ps == NULL) {
54 return true;
55 }
56 psset_update_begin(psset, ps);
57 void *addr = hpdata_reserve_alloc(ps, size);
58 edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
59 /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
60 /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
61 EXTENT_NOT_HEAD);
62 edata_ps_set(r_edata, ps);
63 psset_update_end(psset, ps);
64 return false;
65}
66
67static hpdata_t *
68test_psset_dalloc(psset_t *psset, edata_t *edata) {
69 hpdata_t *ps = edata_ps_get(edata);
70 psset_update_begin(psset, ps);
71 hpdata_unreserve(ps, edata_addr_get(edata), edata_size_get(edata));
72 psset_update_end(psset, ps);
73 if (hpdata_empty(ps)) {
74 psset_remove(psset, ps);
75 return ps;
76 } else {
77 return NULL;
78 }
79}
80
81static void
82edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) {
83 /*
84 * Note that allocations should get the arena ind of their home
85 * arena, *not* the arena ind of the pageslab allocator.
86 */
87 expect_u_eq(ALLOC_ARENA_IND, edata_arena_ind_get(edata),
88 "Arena ind changed");
89 expect_ptr_eq(
90 (void *)((uintptr_t)PAGESLAB_ADDR + (page_offset << LG_PAGE)),
91 edata_addr_get(edata), "Didn't allocate in order");
92 expect_zu_eq(page_cnt << LG_PAGE, edata_size_get(edata), "");
93 expect_false(edata_slab_get(edata), "");
94 expect_u_eq(SC_NSIZES, edata_szind_get_maybe_invalid(edata),
95 "");
96 expect_u64_eq(0, edata_sn_get(edata), "");
97 expect_d_eq(edata_state_get(edata), extent_state_active, "");
98 expect_false(edata_zeroed_get(edata), "");
99 expect_true(edata_committed_get(edata), "");
100 expect_d_eq(EXTENT_PAI_HPA, edata_pai_get(edata), "");
101 expect_false(edata_is_head_get(edata), "");
102}
103
104TEST_BEGIN(test_empty) {
105 bool err;
106 hpdata_t pageslab;
107 hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
108
109 edata_t alloc;
110 edata_init_test(&alloc);
111
112 psset_t psset;
113 psset_init(&psset);
114
115 /* Empty psset should return fail allocations. */
116 err = test_psset_alloc_reuse(&psset, &alloc, PAGE);
117 expect_true(err, "Empty psset succeeded in an allocation.");
118}
119TEST_END
120
121TEST_BEGIN(test_fill) {
122 bool err;
123
124 hpdata_t pageslab;
125 hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
126
127 edata_t alloc[HUGEPAGE_PAGES];
128
129 psset_t psset;
130 psset_init(&psset);
131
132 edata_init_test(&alloc[0]);
133 test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
134 for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
135 edata_init_test(&alloc[i]);
136 err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
137 expect_false(err, "Nonempty psset failed page allocation.");
138 }
139
140 for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
141 edata_t *edata = &alloc[i];
142 edata_expect(edata, i, 1);
143 }
144
145 /* The pageslab, and thus psset, should now have no allocations. */
146 edata_t extra_alloc;
147 edata_init_test(&extra_alloc);
148 err = test_psset_alloc_reuse(&psset, &extra_alloc, PAGE);
149 expect_true(err, "Alloc succeeded even though psset should be empty");
150}
151TEST_END
152
153TEST_BEGIN(test_reuse) {
154 bool err;
155 hpdata_t *ps;
156
157 hpdata_t pageslab;
158 hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
159
160 edata_t alloc[HUGEPAGE_PAGES];
161
162 psset_t psset;
163 psset_init(&psset);
164
165 edata_init_test(&alloc[0]);
166 test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
167 for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
168 edata_init_test(&alloc[i]);
169 err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
170 expect_false(err, "Nonempty psset failed page allocation.");
171 }
172
173 /* Free odd indices. */
174 for (size_t i = 0; i < HUGEPAGE_PAGES; i ++) {
175 if (i % 2 == 0) {
176 continue;
177 }
178 ps = test_psset_dalloc(&psset, &alloc[i]);
179 expect_ptr_null(ps, "Nonempty pageslab evicted");
180 }
181 /* Realloc into them. */
182 for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
183 if (i % 2 == 0) {
184 continue;
185 }
186 err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
187 expect_false(err, "Nonempty psset failed page allocation.");
188 edata_expect(&alloc[i], i, 1);
189 }
190 /* Now, free the pages at indices 0 or 1 mod 2. */
191 for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
192 if (i % 4 > 1) {
193 continue;
194 }
195 ps = test_psset_dalloc(&psset, &alloc[i]);
196 expect_ptr_null(ps, "Nonempty pageslab evicted");
197 }
198 /* And realloc 2-page allocations into them. */
199 for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
200 if (i % 4 != 0) {
201 continue;
202 }
203 err = test_psset_alloc_reuse(&psset, &alloc[i], 2 * PAGE);
204 expect_false(err, "Nonempty psset failed page allocation.");
205 edata_expect(&alloc[i], i, 2);
206 }
207 /* Free all the 2-page allocations. */
208 for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
209 if (i % 4 != 0) {
210 continue;
211 }
212 ps = test_psset_dalloc(&psset, &alloc[i]);
213 expect_ptr_null(ps, "Nonempty pageslab evicted");
214 }
215 /*
216 * Free up a 1-page hole next to a 2-page hole, but somewhere in the
217 * middle of the pageslab. Index 11 should be right before such a hole
218 * (since 12 % 4 == 0).
219 */
220 size_t index_of_3 = 11;
221 ps = test_psset_dalloc(&psset, &alloc[index_of_3]);
222 expect_ptr_null(ps, "Nonempty pageslab evicted");
223 err = test_psset_alloc_reuse(&psset, &alloc[index_of_3], 3 * PAGE);
224 expect_false(err, "Should have been able to find alloc.");
225 edata_expect(&alloc[index_of_3], index_of_3, 3);
226
227 /*
228 * Free up a 4-page hole at the end. Recall that the pages at offsets 0
229 * and 1 mod 4 were freed above, so we just have to free the last
230 * allocations.
231 */
232 ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
233 expect_ptr_null(ps, "Nonempty pageslab evicted");
234 ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 2]);
235 expect_ptr_null(ps, "Nonempty pageslab evicted");
236
237 /* Make sure we can satisfy an allocation at the very end of a slab. */
238 size_t index_of_4 = HUGEPAGE_PAGES - 4;
239 err = test_psset_alloc_reuse(&psset, &alloc[index_of_4], 4 * PAGE);
240 expect_false(err, "Should have been able to find alloc.");
241 edata_expect(&alloc[index_of_4], index_of_4, 4);
242}
243TEST_END
244
245TEST_BEGIN(test_evict) {
246 bool err;
247 hpdata_t *ps;
248
249 hpdata_t pageslab;
250 hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
251
252 edata_t alloc[HUGEPAGE_PAGES];
253
254 psset_t psset;
255 psset_init(&psset);
256
257 /* Alloc the whole slab. */
258 edata_init_test(&alloc[0]);
259 test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
260 for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
261 edata_init_test(&alloc[i]);
262 err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
263 expect_false(err, "Unxpected allocation failure");
264 }
265
266 /* Dealloc the whole slab, going forwards. */
267 for (size_t i = 0; i < HUGEPAGE_PAGES - 1; i++) {
268 ps = test_psset_dalloc(&psset, &alloc[i]);
269 expect_ptr_null(ps, "Nonempty pageslab evicted");
270 }
271 ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
272 expect_ptr_eq(&pageslab, ps, "Empty pageslab not evicted.");
273
274 err = test_psset_alloc_reuse(&psset, &alloc[0], PAGE);
275 expect_true(err, "psset should be empty.");
276}
277TEST_END
278
279TEST_BEGIN(test_multi_pageslab) {
280 bool err;
281 hpdata_t *ps;
282
283 hpdata_t pageslab[2];
284 hpdata_init(&pageslab[0], PAGESLAB_ADDR, PAGESLAB_AGE);
285 hpdata_init(&pageslab[1],
286 (void *)((uintptr_t)PAGESLAB_ADDR + HUGEPAGE),
287 PAGESLAB_AGE + 1);
288
289 edata_t alloc[2][HUGEPAGE_PAGES];
290
291 psset_t psset;
292 psset_init(&psset);
293
294 /* Insert both slabs. */
295 edata_init_test(&alloc[0][0]);
296 test_psset_alloc_new(&psset, &pageslab[0], &alloc[0][0], PAGE);
297 edata_init_test(&alloc[1][0]);
298 test_psset_alloc_new(&psset, &pageslab[1], &alloc[1][0], PAGE);
299
300 /* Fill them both up; make sure we do so in first-fit order. */
301 for (size_t i = 0; i < 2; i++) {
302 for (size_t j = 1; j < HUGEPAGE_PAGES; j++) {
303 edata_init_test(&alloc[i][j]);
304 err = test_psset_alloc_reuse(&psset, &alloc[i][j], PAGE);
305 expect_false(err,
306 "Nonempty psset failed page allocation.");
307 assert_ptr_eq(&pageslab[i], edata_ps_get(&alloc[i][j]),
308 "Didn't pick pageslabs in first-fit");
309 }
310 }
311
312 /*
313 * Free up a 2-page hole in the earlier slab, and a 1-page one in the
314 * later one. We should still pick the later one.
315 */
316 ps = test_psset_dalloc(&psset, &alloc[0][0]);
317 expect_ptr_null(ps, "Unexpected eviction");
318 ps = test_psset_dalloc(&psset, &alloc[0][1]);
319 expect_ptr_null(ps, "Unexpected eviction");
320 ps = test_psset_dalloc(&psset, &alloc[1][0]);
321 expect_ptr_null(ps, "Unexpected eviction");
322 err = test_psset_alloc_reuse(&psset, &alloc[0][0], PAGE);
323 expect_ptr_eq(&pageslab[1], edata_ps_get(&alloc[0][0]),
324 "Should have picked the fuller pageslab");
325
326 /*
327 * Now both slabs have 1-page holes. Free up a second one in the later
328 * slab.
329 */
330 ps = test_psset_dalloc(&psset, &alloc[1][1]);
331 expect_ptr_null(ps, "Unexpected eviction");
332
333 /*
334 * We should be able to allocate a 2-page object, even though an earlier
335 * size class is nonempty.
336 */
337 err = test_psset_alloc_reuse(&psset, &alloc[1][0], 2 * PAGE);
338 expect_false(err, "Allocation should have succeeded");
339}
340TEST_END
341
342static void
343stats_expect_empty(psset_bin_stats_t *stats) {
344 assert_zu_eq(0, stats->npageslabs,
345 "Supposedly empty bin had positive npageslabs");
346 expect_zu_eq(0, stats->nactive, "Unexpected nonempty bin"
347 "Supposedly empty bin had positive nactive");
348}
349
350static void
351stats_expect(psset_t *psset, size_t nactive) {
352 if (nactive == HUGEPAGE_PAGES) {
353 expect_zu_eq(1, psset->stats.full_slabs[0].npageslabs,
354 "Expected a full slab");
355 expect_zu_eq(HUGEPAGE_PAGES,
356 psset->stats.full_slabs[0].nactive,
357 "Should have exactly filled the bin");
358 } else {
359 stats_expect_empty(&psset->stats.full_slabs[0]);
360 }
361 size_t ninactive = HUGEPAGE_PAGES - nactive;
362 pszind_t nonempty_pind = PSSET_NPSIZES;
363 if (ninactive != 0 && ninactive < HUGEPAGE_PAGES) {
364 nonempty_pind = sz_psz2ind(sz_psz_quantize_floor(
365 ninactive << LG_PAGE));
366 }
367 for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
368 if (i == nonempty_pind) {
369 assert_zu_eq(1,
370 psset->stats.nonfull_slabs[i][0].npageslabs,
371 "Should have found a slab");
372 expect_zu_eq(nactive,
373 psset->stats.nonfull_slabs[i][0].nactive,
374 "Mismatch in active pages");
375 } else {
376 stats_expect_empty(&psset->stats.nonfull_slabs[i][0]);
377 }
378 }
379 expect_zu_eq(nactive, psset_nactive(psset), "");
380}
381
382TEST_BEGIN(test_stats) {
383 bool err;
384
385 hpdata_t pageslab;
386 hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
387
388 edata_t alloc[HUGEPAGE_PAGES];
389
390 psset_t psset;
391 psset_init(&psset);
392 stats_expect(&psset, 0);
393
394 edata_init_test(&alloc[0]);
395 test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
396 for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
397 stats_expect(&psset, i);
398 edata_init_test(&alloc[i]);
399 err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
400 expect_false(err, "Nonempty psset failed page allocation.");
401 }
402 stats_expect(&psset, HUGEPAGE_PAGES);
403 hpdata_t *ps;
404 for (ssize_t i = HUGEPAGE_PAGES - 1; i >= 0; i--) {
405 ps = test_psset_dalloc(&psset, &alloc[i]);
406 expect_true((ps == NULL) == (i != 0),
407 "test_psset_dalloc should only evict a slab on the last "
408 "free");
409 stats_expect(&psset, i);
410 }
411
412 test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
413 stats_expect(&psset, 1);
414 psset_update_begin(&psset, &pageslab);
415 stats_expect(&psset, 0);
416 psset_update_end(&psset, &pageslab);
417 stats_expect(&psset, 1);
418}
419TEST_END
420
421/*
422 * Fills in and inserts two pageslabs, with the first better than the second,
423 * and each fully allocated (into the allocations in allocs and worse_allocs,
424 * each of which should be HUGEPAGE_PAGES long), except for a single free page
425 * at the end.
426 *
427 * (There's nothing magic about these numbers; it's just useful to share the
428 * setup between the oldest fit and the insert/remove test).
429 */
430static void
431init_test_pageslabs(psset_t *psset, hpdata_t *pageslab,
432 hpdata_t *worse_pageslab, edata_t *alloc, edata_t *worse_alloc) {
433 bool err;
434
435 hpdata_init(pageslab, (void *)(10 * HUGEPAGE), PAGESLAB_AGE);
436 /*
437 * This pageslab would be better from an address-first-fit POV, but
438 * worse from an age POV.
439 */
440 hpdata_init(worse_pageslab, (void *)(9 * HUGEPAGE), PAGESLAB_AGE + 1);
441
442 psset_init(psset);
443
444 edata_init_test(&alloc[0]);
445 test_psset_alloc_new(psset, pageslab, &alloc[0], PAGE);
446 for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
447 edata_init_test(&alloc[i]);
448 err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
449 expect_false(err, "Nonempty psset failed page allocation.");
450 expect_ptr_eq(pageslab, edata_ps_get(&alloc[i]),
451 "Allocated from the wrong pageslab");
452 }
453
454 edata_init_test(&worse_alloc[0]);
455 test_psset_alloc_new(psset, worse_pageslab, &worse_alloc[0], PAGE);
456 expect_ptr_eq(worse_pageslab, edata_ps_get(&worse_alloc[0]),
457 "Allocated from the wrong pageslab");
458 /*
459 * Make the two pssets otherwise indistinguishable; all full except for
460 * a single page.
461 */
462 for (size_t i = 1; i < HUGEPAGE_PAGES - 1; i++) {
463 edata_init_test(&worse_alloc[i]);
464 err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
465 expect_false(err, "Nonempty psset failed page allocation.");
466 expect_ptr_eq(worse_pageslab, edata_ps_get(&alloc[i]),
467 "Allocated from the wrong pageslab");
468 }
469
470 /* Deallocate the last page from the older pageslab. */
471 hpdata_t *evicted = test_psset_dalloc(psset,
472 &alloc[HUGEPAGE_PAGES - 1]);
473 expect_ptr_null(evicted, "Unexpected eviction");
474}
475
476TEST_BEGIN(test_oldest_fit) {
477 bool err;
478 edata_t alloc[HUGEPAGE_PAGES];
479 edata_t worse_alloc[HUGEPAGE_PAGES];
480
481 hpdata_t pageslab;
482 hpdata_t worse_pageslab;
483
484 psset_t psset;
485
486 init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
487 worse_alloc);
488
489 /* The edata should come from the better pageslab. */
490 edata_t test_edata;
491 edata_init_test(&test_edata);
492 err = test_psset_alloc_reuse(&psset, &test_edata, PAGE);
493 expect_false(err, "Nonempty psset failed page allocation");
494 expect_ptr_eq(&pageslab, edata_ps_get(&test_edata),
495 "Allocated from the wrong pageslab");
496}
497TEST_END
498
499TEST_BEGIN(test_insert_remove) {
500 bool err;
501 hpdata_t *ps;
502 edata_t alloc[HUGEPAGE_PAGES];
503 edata_t worse_alloc[HUGEPAGE_PAGES];
504
505 hpdata_t pageslab;
506 hpdata_t worse_pageslab;
507
508 psset_t psset;
509
510 init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
511 worse_alloc);
512
513 /* Remove better; should still be able to alloc from worse. */
514 psset_update_begin(&psset, &pageslab);
515 err = test_psset_alloc_reuse(&psset, &worse_alloc[HUGEPAGE_PAGES - 1],
516 PAGE);
517 expect_false(err, "Removal should still leave an empty page");
518 expect_ptr_eq(&worse_pageslab,
519 edata_ps_get(&worse_alloc[HUGEPAGE_PAGES - 1]),
520 "Allocated out of wrong ps");
521
522 /*
523 * After deallocating the previous alloc and reinserting better, it
524 * should be preferred for future allocations.
525 */
526 ps = test_psset_dalloc(&psset, &worse_alloc[HUGEPAGE_PAGES - 1]);
527 expect_ptr_null(ps, "Incorrect eviction of nonempty pageslab");
528 psset_update_end(&psset, &pageslab);
529 err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
530 expect_false(err, "psset should be nonempty");
531 expect_ptr_eq(&pageslab, edata_ps_get(&alloc[HUGEPAGE_PAGES - 1]),
532 "Removal/reinsertion shouldn't change ordering");
533 /*
534 * After deallocating and removing both, allocations should fail.
535 */
536 ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
537 expect_ptr_null(ps, "Incorrect eviction");
538 psset_update_begin(&psset, &pageslab);
539 psset_update_begin(&psset, &worse_pageslab);
540 err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
541 expect_true(err, "psset should be empty, but an alloc succeeded");
542}
543TEST_END
544
545TEST_BEGIN(test_purge_prefers_nonhuge) {
546 /*
547 * All else being equal, we should prefer purging non-huge pages over
548 * huge ones for non-empty extents.
549 */
550
551 /* Nothing magic about this constant. */
552 enum {
553 NHP = 23,
554 };
555 hpdata_t *hpdata;
556
557 psset_t psset;
558 psset_init(&psset);
559
560 hpdata_t hpdata_huge[NHP];
561 uintptr_t huge_begin = (uintptr_t)&hpdata_huge[0];
562 uintptr_t huge_end = (uintptr_t)&hpdata_huge[NHP];
563 hpdata_t hpdata_nonhuge[NHP];
564 uintptr_t nonhuge_begin = (uintptr_t)&hpdata_nonhuge[0];
565 uintptr_t nonhuge_end = (uintptr_t)&hpdata_nonhuge[NHP];
566
567 for (size_t i = 0; i < NHP; i++) {
568 hpdata_init(&hpdata_huge[i], (void *)((10 + i) * HUGEPAGE),
569 123 + i);
570 psset_insert(&psset, &hpdata_huge[i]);
571
572 hpdata_init(&hpdata_nonhuge[i],
573 (void *)((10 + NHP + i) * HUGEPAGE),
574 456 + i);
575 psset_insert(&psset, &hpdata_nonhuge[i]);
576
577 }
578 for (int i = 0; i < 2 * NHP; i++) {
579 hpdata = psset_pick_alloc(&psset, HUGEPAGE * 3 / 4);
580 psset_update_begin(&psset, hpdata);
581 void *ptr;
582 ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE * 3 / 4);
583 /* Ignore the first alloc, which will stick around. */
584 (void)ptr;
585 /*
586 * The second alloc is to dirty the pages; free it immediately
587 * after allocating.
588 */
589 ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE / 4);
590 hpdata_unreserve(hpdata, ptr, HUGEPAGE / 4);
591
592 if (huge_begin <= (uintptr_t)hpdata
593 && (uintptr_t)hpdata < huge_end) {
594 hpdata_hugify(hpdata);
595 }
596
597 hpdata_purge_allowed_set(hpdata, true);
598 psset_update_end(&psset, hpdata);
599 }
600
601 /*
602 * We've got a bunch of 1/8th dirty hpdatas. It should give us all the
603 * non-huge ones to purge, then all the huge ones, then refuse to purge
604 * further.
605 */
606 for (int i = 0; i < NHP; i++) {
607 hpdata = psset_pick_purge(&psset);
608 assert_true(nonhuge_begin <= (uintptr_t)hpdata
609 && (uintptr_t)hpdata < nonhuge_end, "");
610 psset_update_begin(&psset, hpdata);
611 test_psset_fake_purge(hpdata);
612 hpdata_purge_allowed_set(hpdata, false);
613 psset_update_end(&psset, hpdata);
614 }
615 for (int i = 0; i < NHP; i++) {
616 hpdata = psset_pick_purge(&psset);
617 expect_true(huge_begin <= (uintptr_t)hpdata
618 && (uintptr_t)hpdata < huge_end, "");
619 psset_update_begin(&psset, hpdata);
620 hpdata_dehugify(hpdata);
621 test_psset_fake_purge(hpdata);
622 hpdata_purge_allowed_set(hpdata, false);
623 psset_update_end(&psset, hpdata);
624 }
625}
626TEST_END
627
628TEST_BEGIN(test_purge_prefers_empty) {
629 void *ptr;
630
631 psset_t psset;
632 psset_init(&psset);
633
634 hpdata_t hpdata_empty;
635 hpdata_t hpdata_nonempty;
636 hpdata_init(&hpdata_empty, (void *)(10 * HUGEPAGE), 123);
637 psset_insert(&psset, &hpdata_empty);
638 hpdata_init(&hpdata_nonempty, (void *)(11 * HUGEPAGE), 456);
639 psset_insert(&psset, &hpdata_nonempty);
640
641 psset_update_begin(&psset, &hpdata_empty);
642 ptr = hpdata_reserve_alloc(&hpdata_empty, PAGE);
643 expect_ptr_eq(hpdata_addr_get(&hpdata_empty), ptr, "");
644 hpdata_unreserve(&hpdata_empty, ptr, PAGE);
645 hpdata_purge_allowed_set(&hpdata_empty, true);
646 psset_update_end(&psset, &hpdata_empty);
647
648 psset_update_begin(&psset, &hpdata_nonempty);
649 ptr = hpdata_reserve_alloc(&hpdata_nonempty, 10 * PAGE);
650 expect_ptr_eq(hpdata_addr_get(&hpdata_nonempty), ptr, "");
651 hpdata_unreserve(&hpdata_nonempty, ptr, 9 * PAGE);
652 hpdata_purge_allowed_set(&hpdata_nonempty, true);
653 psset_update_end(&psset, &hpdata_nonempty);
654
655 /*
656 * The nonempty slab has 9 dirty pages, while the empty one has only 1.
657 * We should still pick the empty one for purging.
658 */
659 hpdata_t *to_purge = psset_pick_purge(&psset);
660 expect_ptr_eq(&hpdata_empty, to_purge, "");
661}
662TEST_END
663
664TEST_BEGIN(test_purge_prefers_empty_huge) {
665 void *ptr;
666
667 psset_t psset;
668 psset_init(&psset);
669
670 enum {NHP = 10 };
671
672 hpdata_t hpdata_huge[NHP];
673 hpdata_t hpdata_nonhuge[NHP];
674
675 uintptr_t cur_addr = 100 * HUGEPAGE;
676 uint64_t cur_age = 123;
677 for (int i = 0; i < NHP; i++) {
678 hpdata_init(&hpdata_huge[i], (void *)cur_addr, cur_age);
679 cur_addr += HUGEPAGE;
680 cur_age++;
681 psset_insert(&psset, &hpdata_huge[i]);
682
683 hpdata_init(&hpdata_nonhuge[i], (void *)cur_addr, cur_age);
684 cur_addr += HUGEPAGE;
685 cur_age++;
686 psset_insert(&psset, &hpdata_nonhuge[i]);
687
688 /*
689 * Make the hpdata_huge[i] fully dirty, empty, purgable, and
690 * huge.
691 */
692 psset_update_begin(&psset, &hpdata_huge[i]);
693 ptr = hpdata_reserve_alloc(&hpdata_huge[i], HUGEPAGE);
694 expect_ptr_eq(hpdata_addr_get(&hpdata_huge[i]), ptr, "");
695 hpdata_hugify(&hpdata_huge[i]);
696 hpdata_unreserve(&hpdata_huge[i], ptr, HUGEPAGE);
697 hpdata_purge_allowed_set(&hpdata_huge[i], true);
698 psset_update_end(&psset, &hpdata_huge[i]);
699
700 /*
701 * Make hpdata_nonhuge[i] fully dirty, empty, purgable, and
702 * non-huge.
703 */
704 psset_update_begin(&psset, &hpdata_nonhuge[i]);
705 ptr = hpdata_reserve_alloc(&hpdata_nonhuge[i], HUGEPAGE);
706 expect_ptr_eq(hpdata_addr_get(&hpdata_nonhuge[i]), ptr, "");
707 hpdata_unreserve(&hpdata_nonhuge[i], ptr, HUGEPAGE);
708 hpdata_purge_allowed_set(&hpdata_nonhuge[i], true);
709 psset_update_end(&psset, &hpdata_nonhuge[i]);
710 }
711
712 /*
713 * We have a bunch of empty slabs, half huge, half nonhuge, inserted in
714 * alternating order. We should pop all the huge ones before popping
715 * any of the non-huge ones for purging.
716 */
717 for (int i = 0; i < NHP; i++) {
718 hpdata_t *to_purge = psset_pick_purge(&psset);
719 expect_ptr_eq(&hpdata_huge[i], to_purge, "");
720 psset_update_begin(&psset, to_purge);
721 hpdata_purge_allowed_set(to_purge, false);
722 psset_update_end(&psset, to_purge);
723 }
724 for (int i = 0; i < NHP; i++) {
725 hpdata_t *to_purge = psset_pick_purge(&psset);
726 expect_ptr_eq(&hpdata_nonhuge[i], to_purge, "");
727 psset_update_begin(&psset, to_purge);
728 hpdata_purge_allowed_set(to_purge, false);
729 psset_update_end(&psset, to_purge);
730 }
731}
732TEST_END
733
734int
735main(void) {
736 return test_no_reentrancy(
737 test_empty,
738 test_fill,
739 test_reuse,
740 test_evict,
741 test_multi_pageslab,
742 test_stats,
743 test_oldest_fit,
744 test_insert_remove,
745 test_purge_prefers_nonhuge,
746 test_purge_prefers_empty,
747 test_purge_prefers_empty_huge);
748}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/ql.c b/examples/redis-unstable/deps/jemalloc/test/unit/ql.c
deleted file mode 100644
index f913058..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/ql.c
+++ /dev/null
@@ -1,317 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/ql.h"
4
5/* Number of ring entries, in [2..26]. */
6#define NENTRIES 9
7
8typedef struct list_s list_t;
9typedef ql_head(list_t) list_head_t;
10
11struct list_s {
12 ql_elm(list_t) link;
13 char id;
14};
15
16static void
17test_empty_list(list_head_t *head) {
18 list_t *t;
19 unsigned i;
20
21 expect_true(ql_empty(head), "Unexpected element for empty list");
22 expect_ptr_null(ql_first(head), "Unexpected element for empty list");
23 expect_ptr_null(ql_last(head, link),
24 "Unexpected element for empty list");
25
26 i = 0;
27 ql_foreach(t, head, link) {
28 i++;
29 }
30 expect_u_eq(i, 0, "Unexpected element for empty list");
31
32 i = 0;
33 ql_reverse_foreach(t, head, link) {
34 i++;
35 }
36 expect_u_eq(i, 0, "Unexpected element for empty list");
37}
38
39TEST_BEGIN(test_ql_empty) {
40 list_head_t head;
41
42 ql_new(&head);
43 test_empty_list(&head);
44}
45TEST_END
46
47static void
48init_entries(list_t *entries, unsigned nentries) {
49 unsigned i;
50
51 for (i = 0; i < nentries; i++) {
52 entries[i].id = 'a' + i;
53 ql_elm_new(&entries[i], link);
54 }
55}
56
57static void
58test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) {
59 list_t *t;
60 unsigned i;
61
62 expect_false(ql_empty(head), "List should not be empty");
63 expect_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
64 expect_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
65 "Element id mismatch");
66
67 i = 0;
68 ql_foreach(t, head, link) {
69 expect_c_eq(t->id, entries[i].id, "Element id mismatch");
70 i++;
71 }
72
73 i = 0;
74 ql_reverse_foreach(t, head, link) {
75 expect_c_eq(t->id, entries[nentries-i-1].id,
76 "Element id mismatch");
77 i++;
78 }
79
80 for (i = 0; i < nentries-1; i++) {
81 t = ql_next(head, &entries[i], link);
82 expect_c_eq(t->id, entries[i+1].id, "Element id mismatch");
83 }
84 expect_ptr_null(ql_next(head, &entries[nentries-1], link),
85 "Unexpected element");
86
87 expect_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
88 for (i = 1; i < nentries; i++) {
89 t = ql_prev(head, &entries[i], link);
90 expect_c_eq(t->id, entries[i-1].id, "Element id mismatch");
91 }
92}
93
94TEST_BEGIN(test_ql_tail_insert) {
95 list_head_t head;
96 list_t entries[NENTRIES];
97 unsigned i;
98
99 ql_new(&head);
100 init_entries(entries, sizeof(entries)/sizeof(list_t));
101 for (i = 0; i < NENTRIES; i++) {
102 ql_tail_insert(&head, &entries[i], link);
103 }
104
105 test_entries_list(&head, entries, NENTRIES);
106}
107TEST_END
108
109TEST_BEGIN(test_ql_tail_remove) {
110 list_head_t head;
111 list_t entries[NENTRIES];
112 unsigned i;
113
114 ql_new(&head);
115 init_entries(entries, sizeof(entries)/sizeof(list_t));
116 for (i = 0; i < NENTRIES; i++) {
117 ql_tail_insert(&head, &entries[i], link);
118 }
119
120 for (i = 0; i < NENTRIES; i++) {
121 test_entries_list(&head, entries, NENTRIES-i);
122 ql_tail_remove(&head, list_t, link);
123 }
124 test_empty_list(&head);
125}
126TEST_END
127
128TEST_BEGIN(test_ql_head_insert) {
129 list_head_t head;
130 list_t entries[NENTRIES];
131 unsigned i;
132
133 ql_new(&head);
134 init_entries(entries, sizeof(entries)/sizeof(list_t));
135 for (i = 0; i < NENTRIES; i++) {
136 ql_head_insert(&head, &entries[NENTRIES-i-1], link);
137 }
138
139 test_entries_list(&head, entries, NENTRIES);
140}
141TEST_END
142
143TEST_BEGIN(test_ql_head_remove) {
144 list_head_t head;
145 list_t entries[NENTRIES];
146 unsigned i;
147
148 ql_new(&head);
149 init_entries(entries, sizeof(entries)/sizeof(list_t));
150 for (i = 0; i < NENTRIES; i++) {
151 ql_head_insert(&head, &entries[NENTRIES-i-1], link);
152 }
153
154 for (i = 0; i < NENTRIES; i++) {
155 test_entries_list(&head, &entries[i], NENTRIES-i);
156 ql_head_remove(&head, list_t, link);
157 }
158 test_empty_list(&head);
159}
160TEST_END
161
162TEST_BEGIN(test_ql_insert) {
163 list_head_t head;
164 list_t entries[8];
165 list_t *a, *b, *c, *d, *e, *f, *g, *h;
166
167 ql_new(&head);
168 init_entries(entries, sizeof(entries)/sizeof(list_t));
169 a = &entries[0];
170 b = &entries[1];
171 c = &entries[2];
172 d = &entries[3];
173 e = &entries[4];
174 f = &entries[5];
175 g = &entries[6];
176 h = &entries[7];
177
178 /*
179 * ql_remove(), ql_before_insert(), and ql_after_insert() are used
180 * internally by other macros that are already tested, so there's no
181 * need to test them completely. However, insertion/deletion from the
182 * middle of lists is not otherwise tested; do so here.
183 */
184 ql_tail_insert(&head, f, link);
185 ql_before_insert(&head, f, b, link);
186 ql_before_insert(&head, f, c, link);
187 ql_after_insert(f, h, link);
188 ql_after_insert(f, g, link);
189 ql_before_insert(&head, b, a, link);
190 ql_after_insert(c, d, link);
191 ql_before_insert(&head, f, e, link);
192
193 test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
194}
195TEST_END
196
197static void
198test_concat_split_entries(list_t *entries, unsigned nentries_a,
199 unsigned nentries_b) {
200 init_entries(entries, nentries_a + nentries_b);
201
202 list_head_t head_a;
203 ql_new(&head_a);
204 for (unsigned i = 0; i < nentries_a; i++) {
205 ql_tail_insert(&head_a, &entries[i], link);
206 }
207 if (nentries_a == 0) {
208 test_empty_list(&head_a);
209 } else {
210 test_entries_list(&head_a, entries, nentries_a);
211 }
212
213 list_head_t head_b;
214 ql_new(&head_b);
215 for (unsigned i = 0; i < nentries_b; i++) {
216 ql_tail_insert(&head_b, &entries[nentries_a + i], link);
217 }
218 if (nentries_b == 0) {
219 test_empty_list(&head_b);
220 } else {
221 test_entries_list(&head_b, entries + nentries_a, nentries_b);
222 }
223
224 ql_concat(&head_a, &head_b, link);
225 if (nentries_a + nentries_b == 0) {
226 test_empty_list(&head_a);
227 } else {
228 test_entries_list(&head_a, entries, nentries_a + nentries_b);
229 }
230 test_empty_list(&head_b);
231
232 if (nentries_b == 0) {
233 return;
234 }
235
236 list_head_t head_c;
237 ql_split(&head_a, &entries[nentries_a], &head_c, link);
238 if (nentries_a == 0) {
239 test_empty_list(&head_a);
240 } else {
241 test_entries_list(&head_a, entries, nentries_a);
242 }
243 test_entries_list(&head_c, entries + nentries_a, nentries_b);
244}
245
246TEST_BEGIN(test_ql_concat_split) {
247 list_t entries[NENTRIES];
248
249 test_concat_split_entries(entries, 0, 0);
250
251 test_concat_split_entries(entries, 0, 1);
252 test_concat_split_entries(entries, 1, 0);
253
254 test_concat_split_entries(entries, 0, NENTRIES);
255 test_concat_split_entries(entries, 1, NENTRIES - 1);
256 test_concat_split_entries(entries, NENTRIES / 2,
257 NENTRIES - NENTRIES / 2);
258 test_concat_split_entries(entries, NENTRIES - 1, 1);
259 test_concat_split_entries(entries, NENTRIES, 0);
260}
261TEST_END
262
263TEST_BEGIN(test_ql_rotate) {
264 list_head_t head;
265 list_t entries[NENTRIES];
266 unsigned i;
267
268 ql_new(&head);
269 init_entries(entries, sizeof(entries)/sizeof(list_t));
270 for (i = 0; i < NENTRIES; i++) {
271 ql_tail_insert(&head, &entries[i], link);
272 }
273
274 char head_id = ql_first(&head)->id;
275 for (i = 0; i < NENTRIES; i++) {
276 assert_c_eq(ql_first(&head)->id, head_id, "");
277 ql_rotate(&head, link);
278 assert_c_eq(ql_last(&head, link)->id, head_id, "");
279 head_id++;
280 }
281 test_entries_list(&head, entries, NENTRIES);
282}
283TEST_END
284
285TEST_BEGIN(test_ql_move) {
286 list_head_t head_dest, head_src;
287 list_t entries[NENTRIES];
288 unsigned i;
289
290 ql_new(&head_src);
291 ql_move(&head_dest, &head_src);
292 test_empty_list(&head_src);
293 test_empty_list(&head_dest);
294
295 init_entries(entries, sizeof(entries)/sizeof(list_t));
296 for (i = 0; i < NENTRIES; i++) {
297 ql_tail_insert(&head_src, &entries[i], link);
298 }
299 ql_move(&head_dest, &head_src);
300 test_empty_list(&head_src);
301 test_entries_list(&head_dest, entries, NENTRIES);
302}
303TEST_END
304
305int
306main(void) {
307 return test(
308 test_ql_empty,
309 test_ql_tail_insert,
310 test_ql_tail_remove,
311 test_ql_head_insert,
312 test_ql_head_remove,
313 test_ql_insert,
314 test_ql_concat_split,
315 test_ql_rotate,
316 test_ql_move);
317}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/qr.c b/examples/redis-unstable/deps/jemalloc/test/unit/qr.c
deleted file mode 100644
index 16eed0e..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/qr.c
+++ /dev/null
@@ -1,243 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/qr.h"
4
5/* Number of ring entries, in [2..26]. */
6#define NENTRIES 9
7/* Split index, in [1..NENTRIES). */
8#define SPLIT_INDEX 5
9
10typedef struct ring_s ring_t;
11
12struct ring_s {
13 qr(ring_t) link;
14 char id;
15};
16
17static void
18init_entries(ring_t *entries) {
19 unsigned i;
20
21 for (i = 0; i < NENTRIES; i++) {
22 qr_new(&entries[i], link);
23 entries[i].id = 'a' + i;
24 }
25}
26
27static void
28test_independent_entries(ring_t *entries) {
29 ring_t *t;
30 unsigned i, j;
31
32 for (i = 0; i < NENTRIES; i++) {
33 j = 0;
34 qr_foreach(t, &entries[i], link) {
35 j++;
36 }
37 expect_u_eq(j, 1,
38 "Iteration over single-element ring should visit precisely "
39 "one element");
40 }
41 for (i = 0; i < NENTRIES; i++) {
42 j = 0;
43 qr_reverse_foreach(t, &entries[i], link) {
44 j++;
45 }
46 expect_u_eq(j, 1,
47 "Iteration over single-element ring should visit precisely "
48 "one element");
49 }
50 for (i = 0; i < NENTRIES; i++) {
51 t = qr_next(&entries[i], link);
52 expect_ptr_eq(t, &entries[i],
53 "Next element in single-element ring should be same as "
54 "current element");
55 }
56 for (i = 0; i < NENTRIES; i++) {
57 t = qr_prev(&entries[i], link);
58 expect_ptr_eq(t, &entries[i],
59 "Previous element in single-element ring should be same as "
60 "current element");
61 }
62}
63
64TEST_BEGIN(test_qr_one) {
65 ring_t entries[NENTRIES];
66
67 init_entries(entries);
68 test_independent_entries(entries);
69}
70TEST_END
71
72static void
73test_entries_ring(ring_t *entries) {
74 ring_t *t;
75 unsigned i, j;
76
77 for (i = 0; i < NENTRIES; i++) {
78 j = 0;
79 qr_foreach(t, &entries[i], link) {
80 expect_c_eq(t->id, entries[(i+j) % NENTRIES].id,
81 "Element id mismatch");
82 j++;
83 }
84 }
85 for (i = 0; i < NENTRIES; i++) {
86 j = 0;
87 qr_reverse_foreach(t, &entries[i], link) {
88 expect_c_eq(t->id, entries[(NENTRIES+i-j-1) %
89 NENTRIES].id, "Element id mismatch");
90 j++;
91 }
92 }
93 for (i = 0; i < NENTRIES; i++) {
94 t = qr_next(&entries[i], link);
95 expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
96 "Element id mismatch");
97 }
98 for (i = 0; i < NENTRIES; i++) {
99 t = qr_prev(&entries[i], link);
100 expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
101 "Element id mismatch");
102 }
103}
104
105TEST_BEGIN(test_qr_after_insert) {
106 ring_t entries[NENTRIES];
107 unsigned i;
108
109 init_entries(entries);
110 for (i = 1; i < NENTRIES; i++) {
111 qr_after_insert(&entries[i - 1], &entries[i], link);
112 }
113 test_entries_ring(entries);
114}
115TEST_END
116
117TEST_BEGIN(test_qr_remove) {
118 ring_t entries[NENTRIES];
119 ring_t *t;
120 unsigned i, j;
121
122 init_entries(entries);
123 for (i = 1; i < NENTRIES; i++) {
124 qr_after_insert(&entries[i - 1], &entries[i], link);
125 }
126
127 for (i = 0; i < NENTRIES; i++) {
128 j = 0;
129 qr_foreach(t, &entries[i], link) {
130 expect_c_eq(t->id, entries[i+j].id,
131 "Element id mismatch");
132 j++;
133 }
134 j = 0;
135 qr_reverse_foreach(t, &entries[i], link) {
136 expect_c_eq(t->id, entries[NENTRIES - 1 - j].id,
137 "Element id mismatch");
138 j++;
139 }
140 qr_remove(&entries[i], link);
141 }
142 test_independent_entries(entries);
143}
144TEST_END
145
146TEST_BEGIN(test_qr_before_insert) {
147 ring_t entries[NENTRIES];
148 ring_t *t;
149 unsigned i, j;
150
151 init_entries(entries);
152 for (i = 1; i < NENTRIES; i++) {
153 qr_before_insert(&entries[i - 1], &entries[i], link);
154 }
155 for (i = 0; i < NENTRIES; i++) {
156 j = 0;
157 qr_foreach(t, &entries[i], link) {
158 expect_c_eq(t->id, entries[(NENTRIES+i-j) %
159 NENTRIES].id, "Element id mismatch");
160 j++;
161 }
162 }
163 for (i = 0; i < NENTRIES; i++) {
164 j = 0;
165 qr_reverse_foreach(t, &entries[i], link) {
166 expect_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
167 "Element id mismatch");
168 j++;
169 }
170 }
171 for (i = 0; i < NENTRIES; i++) {
172 t = qr_next(&entries[i], link);
173 expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
174 "Element id mismatch");
175 }
176 for (i = 0; i < NENTRIES; i++) {
177 t = qr_prev(&entries[i], link);
178 expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
179 "Element id mismatch");
180 }
181}
182TEST_END
183
184static void
185test_split_entries(ring_t *entries) {
186 ring_t *t;
187 unsigned i, j;
188
189 for (i = 0; i < NENTRIES; i++) {
190 j = 0;
191 qr_foreach(t, &entries[i], link) {
192 if (i < SPLIT_INDEX) {
193 expect_c_eq(t->id,
194 entries[(i+j) % SPLIT_INDEX].id,
195 "Element id mismatch");
196 } else {
197 expect_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
198 (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
199 "Element id mismatch");
200 }
201 j++;
202 }
203 }
204}
205
206TEST_BEGIN(test_qr_meld_split) {
207 ring_t entries[NENTRIES];
208 unsigned i;
209
210 init_entries(entries);
211 for (i = 1; i < NENTRIES; i++) {
212 qr_after_insert(&entries[i - 1], &entries[i], link);
213 }
214
215 qr_split(&entries[0], &entries[SPLIT_INDEX], link);
216 test_split_entries(entries);
217
218 qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
219 test_entries_ring(entries);
220
221 qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
222 test_split_entries(entries);
223
224 qr_split(&entries[0], &entries[SPLIT_INDEX], link);
225 test_entries_ring(entries);
226
227 qr_split(&entries[0], &entries[0], link);
228 test_entries_ring(entries);
229
230 qr_meld(&entries[0], &entries[0], link);
231 test_entries_ring(entries);
232}
233TEST_END
234
235int
236main(void) {
237 return test(
238 test_qr_one,
239 test_qr_after_insert,
240 test_qr_remove,
241 test_qr_before_insert,
242 test_qr_meld_split);
243}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/rb.c b/examples/redis-unstable/deps/jemalloc/test/unit/rb.c
deleted file mode 100644
index 827ec51..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/rb.c
+++ /dev/null
@@ -1,1019 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include <stdlib.h>
4
5#include "jemalloc/internal/rb.h"
6
7#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
8 a_type *rbp_bh_t; \
9 for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \
10 NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \
11 rbp_bh_t)) { \
12 if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
13 (r_height)++; \
14 } \
15 } \
16} while (0)
17
18static bool summarize_always_returns_true = false;
19
20typedef struct node_s node_t;
21struct node_s {
22#define NODE_MAGIC 0x9823af7e
23 uint32_t magic;
24 rb_node(node_t) link;
25 /* Order used by nodes. */
26 uint64_t key;
27 /*
28 * Our made-up summary property is "specialness", with summarization
29 * taking the max.
30 */
31 uint64_t specialness;
32
33 /*
34 * Used by some of the test randomization to avoid double-removing
35 * nodes.
36 */
37 bool mid_remove;
38
39 /*
40 * To test searching functionality, we want to temporarily weaken the
41 * ordering to allow non-equal nodes that nevertheless compare equal.
42 */
43 bool allow_duplicates;
44
45 /*
46 * In check_consistency, it's handy to know a node's rank in the tree;
47 * this tracks it (but only there; not all tests use this).
48 */
49 int rank;
50 int filtered_rank;
51
52 /*
53 * Replicate the internal structure of the tree, to make sure the
54 * implementation doesn't miss any updates.
55 */
56 const node_t *summary_lchild;
57 const node_t *summary_rchild;
58 uint64_t summary_max_specialness;
59};
60
61static int
62node_cmp(const node_t *a, const node_t *b) {
63 int ret;
64
65 expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
66 expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
67
68 ret = (a->key > b->key) - (a->key < b->key);
69 if (ret == 0 && !a->allow_duplicates) {
70 /*
71 * Duplicates are not allowed in the tree, so force an
72 * arbitrary ordering for non-identical items with equal keys,
73 * unless the user is searching and wants to allow the
74 * duplicate.
75 */
76 ret = (((uintptr_t)a) > ((uintptr_t)b))
77 - (((uintptr_t)a) < ((uintptr_t)b));
78 }
79 return ret;
80}
81
82static uint64_t
83node_subtree_specialness(node_t *n, const node_t *lchild,
84 const node_t *rchild) {
85 uint64_t subtree_specialness = n->specialness;
86 if (lchild != NULL
87 && lchild->summary_max_specialness > subtree_specialness) {
88 subtree_specialness = lchild->summary_max_specialness;
89 }
90 if (rchild != NULL
91 && rchild->summary_max_specialness > subtree_specialness) {
92 subtree_specialness = rchild->summary_max_specialness;
93 }
94 return subtree_specialness;
95}
96
97static bool
98node_summarize(node_t *a, const node_t *lchild, const node_t *rchild) {
99 uint64_t new_summary_max_specialness = node_subtree_specialness(
100 a, lchild, rchild);
101 bool changed = (a->summary_lchild != lchild)
102 || (a->summary_rchild != rchild)
103 || (new_summary_max_specialness != a->summary_max_specialness);
104 a->summary_max_specialness = new_summary_max_specialness;
105 a->summary_lchild = lchild;
106 a->summary_rchild = rchild;
107 return changed || summarize_always_returns_true;
108}
109
110typedef rb_tree(node_t) tree_t;
111rb_summarized_proto(static, tree_, tree_t, node_t);
112rb_summarized_gen(static, tree_, tree_t, node_t, link, node_cmp,
113 node_summarize);
114
115static bool
116specialness_filter_node(void *ctx, node_t *node) {
117 uint64_t specialness = *(uint64_t *)ctx;
118 return node->specialness >= specialness;
119}
120
121static bool
122specialness_filter_subtree(void *ctx, node_t *node) {
123 uint64_t specialness = *(uint64_t *)ctx;
124 return node->summary_max_specialness >= specialness;
125}
126
127static node_t *
128tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
129 unsigned *i = (unsigned *)data;
130 node_t *search_node;
131
132 expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
133
134 /* Test rb_search(). */
135 search_node = tree_search(tree, node);
136 expect_ptr_eq(search_node, node,
137 "tree_search() returned unexpected node");
138
139 /* Test rb_nsearch(). */
140 search_node = tree_nsearch(tree, node);
141 expect_ptr_eq(search_node, node,
142 "tree_nsearch() returned unexpected node");
143
144 /* Test rb_psearch(). */
145 search_node = tree_psearch(tree, node);
146 expect_ptr_eq(search_node, node,
147 "tree_psearch() returned unexpected node");
148
149 (*i)++;
150
151 return NULL;
152}
153
154TEST_BEGIN(test_rb_empty) {
155 tree_t tree;
156 node_t key;
157
158 tree_new(&tree);
159
160 expect_true(tree_empty(&tree), "Tree should be empty");
161 expect_ptr_null(tree_first(&tree), "Unexpected node");
162 expect_ptr_null(tree_last(&tree), "Unexpected node");
163
164 key.key = 0;
165 key.magic = NODE_MAGIC;
166 expect_ptr_null(tree_search(&tree, &key), "Unexpected node");
167
168 key.key = 0;
169 key.magic = NODE_MAGIC;
170 expect_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
171
172 key.key = 0;
173 key.magic = NODE_MAGIC;
174 expect_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
175
176 unsigned nodes = 0;
177 tree_iter_filtered(&tree, NULL, &tree_iterate_cb,
178 &nodes, &specialness_filter_node, &specialness_filter_subtree,
179 NULL);
180 expect_u_eq(0, nodes, "");
181
182 nodes = 0;
183 tree_reverse_iter_filtered(&tree, NULL, &tree_iterate_cb,
184 &nodes, &specialness_filter_node, &specialness_filter_subtree,
185 NULL);
186 expect_u_eq(0, nodes, "");
187
188 expect_ptr_null(tree_first_filtered(&tree, &specialness_filter_node,
189 &specialness_filter_subtree, NULL), "");
190 expect_ptr_null(tree_last_filtered(&tree, &specialness_filter_node,
191 &specialness_filter_subtree, NULL), "");
192
193 key.key = 0;
194 key.magic = NODE_MAGIC;
195 expect_ptr_null(tree_search_filtered(&tree, &key,
196 &specialness_filter_node, &specialness_filter_subtree, NULL), "");
197 expect_ptr_null(tree_nsearch_filtered(&tree, &key,
198 &specialness_filter_node, &specialness_filter_subtree, NULL), "");
199 expect_ptr_null(tree_psearch_filtered(&tree, &key,
200 &specialness_filter_node, &specialness_filter_subtree, NULL), "");
201}
202TEST_END
203
204static unsigned
205tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
206 unsigned ret = 0;
207 node_t *left_node;
208 node_t *right_node;
209
210 if (node == NULL) {
211 return ret;
212 }
213
214 left_node = rbtn_left_get(node_t, link, node);
215 right_node = rbtn_right_get(node_t, link, node);
216
217 expect_ptr_eq(left_node, node->summary_lchild,
218 "summary missed a tree update");
219 expect_ptr_eq(right_node, node->summary_rchild,
220 "summary missed a tree update");
221
222 uint64_t expected_subtree_specialness = node_subtree_specialness(node,
223 left_node, right_node);
224 expect_u64_eq(expected_subtree_specialness,
225 node->summary_max_specialness, "Incorrect summary");
226
227 if (!rbtn_red_get(node_t, link, node)) {
228 black_depth++;
229 }
230
231 /* Red nodes must be interleaved with black nodes. */
232 if (rbtn_red_get(node_t, link, node)) {
233 if (left_node != NULL) {
234 expect_false(rbtn_red_get(node_t, link, left_node),
235 "Node should be black");
236 }
237 if (right_node != NULL) {
238 expect_false(rbtn_red_get(node_t, link, right_node),
239 "Node should be black");
240 }
241 }
242
243 /* Self. */
244 expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
245
246 /* Left subtree. */
247 if (left_node != NULL) {
248 ret += tree_recurse(left_node, black_height, black_depth);
249 } else {
250 ret += (black_depth != black_height);
251 }
252
253 /* Right subtree. */
254 if (right_node != NULL) {
255 ret += tree_recurse(right_node, black_height, black_depth);
256 } else {
257 ret += (black_depth != black_height);
258 }
259
260 return ret;
261}
262
263static unsigned
264tree_iterate(tree_t *tree) {
265 unsigned i;
266
267 i = 0;
268 tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
269
270 return i;
271}
272
273static unsigned
274tree_iterate_reverse(tree_t *tree) {
275 unsigned i;
276
277 i = 0;
278 tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
279
280 return i;
281}
282
283static void
284node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
285 node_t *search_node;
286 unsigned black_height, imbalances;
287
288 tree_remove(tree, node);
289
290 /* Test rb_nsearch(). */
291 search_node = tree_nsearch(tree, node);
292 if (search_node != NULL) {
293 expect_u64_ge(search_node->key, node->key,
294 "Key ordering error");
295 }
296
297 /* Test rb_psearch(). */
298 search_node = tree_psearch(tree, node);
299 if (search_node != NULL) {
300 expect_u64_le(search_node->key, node->key,
301 "Key ordering error");
302 }
303
304 node->magic = 0;
305
306 rbtn_black_height(node_t, link, tree, black_height);
307 imbalances = tree_recurse(tree->rbt_root, black_height, 0);
308 expect_u_eq(imbalances, 0, "Tree is unbalanced");
309 expect_u_eq(tree_iterate(tree), nnodes-1,
310 "Unexpected node iteration count");
311 expect_u_eq(tree_iterate_reverse(tree), nnodes-1,
312 "Unexpected node iteration count");
313}
314
315static node_t *
316remove_iterate_cb(tree_t *tree, node_t *node, void *data) {
317 unsigned *nnodes = (unsigned *)data;
318 node_t *ret = tree_next(tree, node);
319
320 node_remove(tree, node, *nnodes);
321
322 return ret;
323}
324
325static node_t *
326remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) {
327 unsigned *nnodes = (unsigned *)data;
328 node_t *ret = tree_prev(tree, node);
329
330 node_remove(tree, node, *nnodes);
331
332 return ret;
333}
334
335static void
336destroy_cb(node_t *node, void *data) {
337 unsigned *nnodes = (unsigned *)data;
338
339 expect_u_gt(*nnodes, 0, "Destruction removed too many nodes");
340 (*nnodes)--;
341}
342
343TEST_BEGIN(test_rb_random) {
344 enum {
345 NNODES = 25,
346 NBAGS = 500,
347 SEED = 42
348 };
349 sfmt_t *sfmt;
350 uint64_t bag[NNODES];
351 tree_t tree;
352 node_t nodes[NNODES];
353 unsigned i, j, k, black_height, imbalances;
354
355 sfmt = init_gen_rand(SEED);
356 for (i = 0; i < NBAGS; i++) {
357 switch (i) {
358 case 0:
359 /* Insert in order. */
360 for (j = 0; j < NNODES; j++) {
361 bag[j] = j;
362 }
363 break;
364 case 1:
365 /* Insert in reverse order. */
366 for (j = 0; j < NNODES; j++) {
367 bag[j] = NNODES - j - 1;
368 }
369 break;
370 default:
371 for (j = 0; j < NNODES; j++) {
372 bag[j] = gen_rand64_range(sfmt, NNODES);
373 }
374 }
375
376 /*
377 * We alternate test behavior with a period of 2 here, and a
378 * period of 5 down below, so there's no cycle in which certain
379 * combinations get omitted.
380 */
381 summarize_always_returns_true = (i % 2 == 0);
382
383 for (j = 1; j <= NNODES; j++) {
384 /* Initialize tree and nodes. */
385 tree_new(&tree);
386 for (k = 0; k < j; k++) {
387 nodes[k].magic = NODE_MAGIC;
388 nodes[k].key = bag[k];
389 nodes[k].specialness = gen_rand64_range(sfmt,
390 NNODES);
391 nodes[k].mid_remove = false;
392 nodes[k].allow_duplicates = false;
393 nodes[k].summary_lchild = NULL;
394 nodes[k].summary_rchild = NULL;
395 nodes[k].summary_max_specialness = 0;
396 }
397
398 /* Insert nodes. */
399 for (k = 0; k < j; k++) {
400 tree_insert(&tree, &nodes[k]);
401
402 rbtn_black_height(node_t, link, &tree,
403 black_height);
404 imbalances = tree_recurse(tree.rbt_root,
405 black_height, 0);
406 expect_u_eq(imbalances, 0,
407 "Tree is unbalanced");
408
409 expect_u_eq(tree_iterate(&tree), k+1,
410 "Unexpected node iteration count");
411 expect_u_eq(tree_iterate_reverse(&tree), k+1,
412 "Unexpected node iteration count");
413
414 expect_false(tree_empty(&tree),
415 "Tree should not be empty");
416 expect_ptr_not_null(tree_first(&tree),
417 "Tree should not be empty");
418 expect_ptr_not_null(tree_last(&tree),
419 "Tree should not be empty");
420
421 tree_next(&tree, &nodes[k]);
422 tree_prev(&tree, &nodes[k]);
423 }
424
425 /* Remove nodes. */
426 switch (i % 5) {
427 case 0:
428 for (k = 0; k < j; k++) {
429 node_remove(&tree, &nodes[k], j - k);
430 }
431 break;
432 case 1:
433 for (k = j; k > 0; k--) {
434 node_remove(&tree, &nodes[k-1], k);
435 }
436 break;
437 case 2: {
438 node_t *start;
439 unsigned nnodes = j;
440
441 start = NULL;
442 do {
443 start = tree_iter(&tree, start,
444 remove_iterate_cb, (void *)&nnodes);
445 nnodes--;
446 } while (start != NULL);
447 expect_u_eq(nnodes, 0,
448 "Removal terminated early");
449 break;
450 } case 3: {
451 node_t *start;
452 unsigned nnodes = j;
453
454 start = NULL;
455 do {
456 start = tree_reverse_iter(&tree, start,
457 remove_reverse_iterate_cb,
458 (void *)&nnodes);
459 nnodes--;
460 } while (start != NULL);
461 expect_u_eq(nnodes, 0,
462 "Removal terminated early");
463 break;
464 } case 4: {
465 unsigned nnodes = j;
466 tree_destroy(&tree, destroy_cb, &nnodes);
467 expect_u_eq(nnodes, 0,
468 "Destruction terminated early");
469 break;
470 } default:
471 not_reached();
472 }
473 }
474 }
475 fini_gen_rand(sfmt);
476}
477TEST_END
478
479static void
480expect_simple_consistency(tree_t *tree, uint64_t specialness,
481 bool expected_empty, node_t *expected_first, node_t *expected_last) {
482 bool empty;
483 node_t *first;
484 node_t *last;
485
486 empty = tree_empty_filtered(tree, &specialness_filter_node,
487 &specialness_filter_subtree, &specialness);
488 expect_b_eq(expected_empty, empty, "");
489
490 first = tree_first_filtered(tree,
491 &specialness_filter_node, &specialness_filter_subtree,
492 (void *)&specialness);
493 expect_ptr_eq(expected_first, first, "");
494
495 last = tree_last_filtered(tree,
496 &specialness_filter_node, &specialness_filter_subtree,
497 (void *)&specialness);
498 expect_ptr_eq(expected_last, last, "");
499}
500
501TEST_BEGIN(test_rb_filter_simple) {
502 enum {FILTER_NODES = 10};
503 node_t nodes[FILTER_NODES];
504 for (unsigned i = 0; i < FILTER_NODES; i++) {
505 nodes[i].magic = NODE_MAGIC;
506 nodes[i].key = i;
507 if (i == 0) {
508 nodes[i].specialness = 0;
509 } else {
510 nodes[i].specialness = ffs_u(i);
511 }
512 nodes[i].mid_remove = false;
513 nodes[i].allow_duplicates = false;
514 nodes[i].summary_lchild = NULL;
515 nodes[i].summary_rchild = NULL;
516 nodes[i].summary_max_specialness = 0;
517 }
518
519 summarize_always_returns_true = false;
520
521 tree_t tree;
522 tree_new(&tree);
523
524 /* Should be empty */
525 expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ true,
526 /* first */ NULL, /* last */ NULL);
527
528 /* Fill in just the odd nodes. */
529 for (int i = 1; i < FILTER_NODES; i += 2) {
530 tree_insert(&tree, &nodes[i]);
531 }
532
533 /* A search for an odd node should succeed. */
534 expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ false,
535 /* first */ &nodes[1], /* last */ &nodes[9]);
536
537 /* But a search for an even one should fail. */
538 expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ true,
539 /* first */ NULL, /* last */ NULL);
540
541 /* Now we add an even. */
542 tree_insert(&tree, &nodes[4]);
543 expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
544 /* first */ &nodes[4], /* last */ &nodes[4]);
545
546 /* A smaller even, and a larger even. */
547 tree_insert(&tree, &nodes[2]);
548 tree_insert(&tree, &nodes[8]);
549
550 /*
551 * A first-search (resp. last-search) for an even should switch to the
552 * lower (higher) one, now that it's been added.
553 */
554 expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
555 /* first */ &nodes[2], /* last */ &nodes[8]);
556
557 /*
558 * If we remove 2, a first-search we should go back to 4, while a
559 * last-search should remain unchanged.
560 */
561 tree_remove(&tree, &nodes[2]);
562 expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
563 /* first */ &nodes[4], /* last */ &nodes[8]);
564
565 /* Reinsert 2, then find it again. */
566 tree_insert(&tree, &nodes[2]);
567 expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
568 /* first */ &nodes[2], /* last */ &nodes[8]);
569
570 /* Searching for a multiple of 4 should not have changed. */
571 expect_simple_consistency(&tree, /* specialness */ 2, /* empty */ false,
572 /* first */ &nodes[4], /* last */ &nodes[8]);
573
574 /* And a multiple of 8 */
575 expect_simple_consistency(&tree, /* specialness */ 3, /* empty */ false,
576 /* first */ &nodes[8], /* last */ &nodes[8]);
577
578 /* But not a multiple of 16 */
579 expect_simple_consistency(&tree, /* specialness */ 4, /* empty */ true,
580 /* first */ NULL, /* last */ NULL);
581}
582TEST_END
583
584typedef struct iter_ctx_s iter_ctx_t;
585struct iter_ctx_s {
586 int ncalls;
587 node_t *last_node;
588
589 int ncalls_max;
590 bool forward;
591};
592
593static node_t *
594tree_iterate_filtered_cb(tree_t *tree, node_t *node, void *arg) {
595 iter_ctx_t *ctx = (iter_ctx_t *)arg;
596 ctx->ncalls++;
597 expect_u64_ge(node->specialness, 1,
598 "Should only invoke cb on nodes that pass the filter");
599 if (ctx->last_node != NULL) {
600 if (ctx->forward) {
601 expect_d_lt(node_cmp(ctx->last_node, node), 0,
602 "Incorrect iteration order");
603 } else {
604 expect_d_gt(node_cmp(ctx->last_node, node), 0,
605 "Incorrect iteration order");
606 }
607 }
608 ctx->last_node = node;
609 if (ctx->ncalls == ctx->ncalls_max) {
610 return node;
611 }
612 return NULL;
613}
614
615static int
616qsort_node_cmp(const void *ap, const void *bp) {
617 node_t *a = *(node_t **)ap;
618 node_t *b = *(node_t **)bp;
619 return node_cmp(a, b);
620}
621
622#define UPDATE_TEST_MAX 100
623static void
624check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) {
625 uint64_t specialness = 1;
626
627 bool empty;
628 bool real_empty = true;
629 node_t *first;
630 node_t *real_first = NULL;
631 node_t *last;
632 node_t *real_last = NULL;
633 for (int i = 0; i < nnodes; i++) {
634 if (nodes[i].specialness >= specialness) {
635 real_empty = false;
636 if (real_first == NULL
637 || node_cmp(&nodes[i], real_first) < 0) {
638 real_first = &nodes[i];
639 }
640 if (real_last == NULL
641 || node_cmp(&nodes[i], real_last) > 0) {
642 real_last = &nodes[i];
643 }
644 }
645 }
646
647 empty = tree_empty_filtered(tree, &specialness_filter_node,
648 &specialness_filter_subtree, &specialness);
649 expect_b_eq(real_empty, empty, "");
650
651 first = tree_first_filtered(tree, &specialness_filter_node,
652 &specialness_filter_subtree, &specialness);
653 expect_ptr_eq(real_first, first, "");
654
655 last = tree_last_filtered(tree, &specialness_filter_node,
656 &specialness_filter_subtree, &specialness);
657 expect_ptr_eq(real_last, last, "");
658
659 for (int i = 0; i < nnodes; i++) {
660 node_t *next_filtered;
661 node_t *real_next_filtered = NULL;
662 node_t *prev_filtered;
663 node_t *real_prev_filtered = NULL;
664 for (int j = 0; j < nnodes; j++) {
665 if (nodes[j].specialness < specialness) {
666 continue;
667 }
668 if (node_cmp(&nodes[j], &nodes[i]) < 0
669 && (real_prev_filtered == NULL
670 || node_cmp(&nodes[j], real_prev_filtered) > 0)) {
671 real_prev_filtered = &nodes[j];
672 }
673 if (node_cmp(&nodes[j], &nodes[i]) > 0
674 && (real_next_filtered == NULL
675 || node_cmp(&nodes[j], real_next_filtered) < 0)) {
676 real_next_filtered = &nodes[j];
677 }
678 }
679 next_filtered = tree_next_filtered(tree, &nodes[i],
680 &specialness_filter_node, &specialness_filter_subtree,
681 &specialness);
682 expect_ptr_eq(real_next_filtered, next_filtered, "");
683
684 prev_filtered = tree_prev_filtered(tree, &nodes[i],
685 &specialness_filter_node, &specialness_filter_subtree,
686 &specialness);
687 expect_ptr_eq(real_prev_filtered, prev_filtered, "");
688
689 node_t *search_filtered;
690 node_t *real_search_filtered;
691 node_t *nsearch_filtered;
692 node_t *real_nsearch_filtered;
693 node_t *psearch_filtered;
694 node_t *real_psearch_filtered;
695
696 /*
697 * search, nsearch, psearch from a node before nodes[i] in the
698 * ordering.
699 */
700 node_t before;
701 before.magic = NODE_MAGIC;
702 before.key = nodes[i].key - 1;
703 before.allow_duplicates = false;
704 real_search_filtered = NULL;
705 search_filtered = tree_search_filtered(tree, &before,
706 &specialness_filter_node, &specialness_filter_subtree,
707 &specialness);
708 expect_ptr_eq(real_search_filtered, search_filtered, "");
709
710 real_nsearch_filtered = (nodes[i].specialness >= specialness ?
711 &nodes[i] : real_next_filtered);
712 nsearch_filtered = tree_nsearch_filtered(tree, &before,
713 &specialness_filter_node, &specialness_filter_subtree,
714 &specialness);
715 expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
716
717 real_psearch_filtered = real_prev_filtered;
718 psearch_filtered = tree_psearch_filtered(tree, &before,
719 &specialness_filter_node, &specialness_filter_subtree,
720 &specialness);
721 expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
722
723 /* search, nsearch, psearch from nodes[i] */
724 real_search_filtered = (nodes[i].specialness >= specialness ?
725 &nodes[i] : NULL);
726 search_filtered = tree_search_filtered(tree, &nodes[i],
727 &specialness_filter_node, &specialness_filter_subtree,
728 &specialness);
729 expect_ptr_eq(real_search_filtered, search_filtered, "");
730
731 real_nsearch_filtered = (nodes[i].specialness >= specialness ?
732 &nodes[i] : real_next_filtered);
733 nsearch_filtered = tree_nsearch_filtered(tree, &nodes[i],
734 &specialness_filter_node, &specialness_filter_subtree,
735 &specialness);
736 expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
737
738 real_psearch_filtered = (nodes[i].specialness >= specialness ?
739 &nodes[i] : real_prev_filtered);
740 psearch_filtered = tree_psearch_filtered(tree, &nodes[i],
741 &specialness_filter_node, &specialness_filter_subtree,
742 &specialness);
743 expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
744
745 /*
746 * search, nsearch, psearch from a node equivalent to but
747 * distinct from nodes[i].
748 */
749 node_t equiv;
750 equiv.magic = NODE_MAGIC;
751 equiv.key = nodes[i].key;
752 equiv.allow_duplicates = true;
753 real_search_filtered = (nodes[i].specialness >= specialness ?
754 &nodes[i] : NULL);
755 search_filtered = tree_search_filtered(tree, &equiv,
756 &specialness_filter_node, &specialness_filter_subtree,
757 &specialness);
758 expect_ptr_eq(real_search_filtered, search_filtered, "");
759
760 real_nsearch_filtered = (nodes[i].specialness >= specialness ?
761 &nodes[i] : real_next_filtered);
762 nsearch_filtered = tree_nsearch_filtered(tree, &equiv,
763 &specialness_filter_node, &specialness_filter_subtree,
764 &specialness);
765 expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
766
767 real_psearch_filtered = (nodes[i].specialness >= specialness ?
768 &nodes[i] : real_prev_filtered);
769 psearch_filtered = tree_psearch_filtered(tree, &equiv,
770 &specialness_filter_node, &specialness_filter_subtree,
771 &specialness);
772 expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
773
774 /*
775 * search, nsearch, psearch from a node after nodes[i] in the
776 * ordering.
777 */
778 node_t after;
779 after.magic = NODE_MAGIC;
780 after.key = nodes[i].key + 1;
781 after.allow_duplicates = false;
782 real_search_filtered = NULL;
783 search_filtered = tree_search_filtered(tree, &after,
784 &specialness_filter_node, &specialness_filter_subtree,
785 &specialness);
786 expect_ptr_eq(real_search_filtered, search_filtered, "");
787
788 real_nsearch_filtered = real_next_filtered;
789 nsearch_filtered = tree_nsearch_filtered(tree, &after,
790 &specialness_filter_node, &specialness_filter_subtree,
791 &specialness);
792 expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
793
794 real_psearch_filtered = (nodes[i].specialness >= specialness ?
795 &nodes[i] : real_prev_filtered);
796 psearch_filtered = tree_psearch_filtered(tree, &after,
797 &specialness_filter_node, &specialness_filter_subtree,
798 &specialness);
799 expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
800 }
801
802 /* Filtered iteration test setup. */
803 int nspecial = 0;
804 node_t *sorted_nodes[UPDATE_TEST_MAX];
805 node_t *sorted_filtered_nodes[UPDATE_TEST_MAX];
806 for (int i = 0; i < nnodes; i++) {
807 sorted_nodes[i] = &nodes[i];
808 }
809 qsort(sorted_nodes, nnodes, sizeof(node_t *), &qsort_node_cmp);
810 for (int i = 0; i < nnodes; i++) {
811 sorted_nodes[i]->rank = i;
812 sorted_nodes[i]->filtered_rank = nspecial;
813 if (sorted_nodes[i]->specialness >= 1) {
814 sorted_filtered_nodes[nspecial] = sorted_nodes[i];
815 nspecial++;
816 }
817 }
818
819 node_t *iter_result;
820
821 iter_ctx_t ctx;
822 ctx.ncalls = 0;
823 ctx.last_node = NULL;
824 ctx.ncalls_max = INT_MAX;
825 ctx.forward = true;
826
827 /* Filtered forward iteration from the beginning. */
828 iter_result = tree_iter_filtered(tree, NULL, &tree_iterate_filtered_cb,
829 &ctx, &specialness_filter_node, &specialness_filter_subtree,
830 &specialness);
831 expect_ptr_null(iter_result, "");
832 expect_d_eq(nspecial, ctx.ncalls, "");
833 /* Filtered forward iteration from a starting point. */
834 for (int i = 0; i < nnodes; i++) {
835 ctx.ncalls = 0;
836 ctx.last_node = NULL;
837 iter_result = tree_iter_filtered(tree, &nodes[i],
838 &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
839 &specialness_filter_subtree, &specialness);
840 expect_ptr_null(iter_result, "");
841 expect_d_eq(nspecial - nodes[i].filtered_rank, ctx.ncalls, "");
842 }
843 /* Filtered forward iteration from the beginning, with stopping */
844 for (int i = 0; i < nspecial; i++) {
845 ctx.ncalls = 0;
846 ctx.last_node = NULL;
847 ctx.ncalls_max = i + 1;
848 iter_result = tree_iter_filtered(tree, NULL,
849 &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
850 &specialness_filter_subtree, &specialness);
851 expect_ptr_eq(sorted_filtered_nodes[i], iter_result, "");
852 expect_d_eq(ctx.ncalls, i + 1, "");
853 }
854 /* Filtered forward iteration from a starting point, with stopping. */
855 for (int i = 0; i < nnodes; i++) {
856 for (int j = 0; j < nspecial - nodes[i].filtered_rank; j++) {
857 ctx.ncalls = 0;
858 ctx.last_node = NULL;
859 ctx.ncalls_max = j + 1;
860 iter_result = tree_iter_filtered(tree, &nodes[i],
861 &tree_iterate_filtered_cb, &ctx,
862 &specialness_filter_node,
863 &specialness_filter_subtree, &specialness);
864 expect_d_eq(j + 1, ctx.ncalls, "");
865 expect_ptr_eq(sorted_filtered_nodes[
866 nodes[i].filtered_rank + j], iter_result, "");
867 }
868 }
869
870 /* Backwards iteration. */
871 ctx.ncalls = 0;
872 ctx.last_node = NULL;
873 ctx.ncalls_max = INT_MAX;
874 ctx.forward = false;
875
876 /* Filtered backward iteration from the end. */
877 iter_result = tree_reverse_iter_filtered(tree, NULL,
878 &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
879 &specialness_filter_subtree, &specialness);
880 expect_ptr_null(iter_result, "");
881 expect_d_eq(nspecial, ctx.ncalls, "");
882 /* Filtered backward iteration from a starting point. */
883 for (int i = 0; i < nnodes; i++) {
884 ctx.ncalls = 0;
885 ctx.last_node = NULL;
886 iter_result = tree_reverse_iter_filtered(tree, &nodes[i],
887 &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
888 &specialness_filter_subtree, &specialness);
889 expect_ptr_null(iter_result, "");
890 int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
891 expect_d_eq(nodes[i].filtered_rank + surplus_rank, ctx.ncalls,
892 "");
893 }
894 /* Filtered backward iteration from the end, with stopping */
895 for (int i = 0; i < nspecial; i++) {
896 ctx.ncalls = 0;
897 ctx.last_node = NULL;
898 ctx.ncalls_max = i + 1;
899 iter_result = tree_reverse_iter_filtered(tree, NULL,
900 &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
901 &specialness_filter_subtree, &specialness);
902 expect_ptr_eq(sorted_filtered_nodes[nspecial - i - 1],
903 iter_result, "");
904 expect_d_eq(ctx.ncalls, i + 1, "");
905 }
906 /* Filtered backward iteration from a starting point, with stopping. */
907 for (int i = 0; i < nnodes; i++) {
908 int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
909 for (int j = 0; j < nodes[i].filtered_rank + surplus_rank;
910 j++) {
911 ctx.ncalls = 0;
912 ctx.last_node = NULL;
913 ctx.ncalls_max = j + 1;
914 iter_result = tree_reverse_iter_filtered(tree,
915 &nodes[i], &tree_iterate_filtered_cb, &ctx,
916 &specialness_filter_node,
917 &specialness_filter_subtree, &specialness);
918 expect_d_eq(j + 1, ctx.ncalls, "");
919 expect_ptr_eq(sorted_filtered_nodes[
920 nodes[i].filtered_rank - j - 1 + surplus_rank],
921 iter_result, "");
922 }
923 }
924}
925
926static void
927do_update_search_test(int nnodes, int ntrees, int nremovals,
928 int nupdates) {
929 node_t nodes[UPDATE_TEST_MAX];
930 assert(nnodes <= UPDATE_TEST_MAX);
931
932 sfmt_t *sfmt = init_gen_rand(12345);
933 for (int i = 0; i < ntrees; i++) {
934 tree_t tree;
935 tree_new(&tree);
936 for (int j = 0; j < nnodes; j++) {
937 nodes[j].magic = NODE_MAGIC;
938 /*
939 * In consistency checking, we increment or decrement a
940 * key and assume that the result is not a key in the
941 * tree. This isn't a *real* concern with 64-bit keys
942 * and a good PRNG, but why not be correct anyways?
943 */
944 nodes[j].key = 2 * gen_rand64(sfmt);
945 nodes[j].specialness = 0;
946 nodes[j].mid_remove = false;
947 nodes[j].allow_duplicates = false;
948 nodes[j].summary_lchild = NULL;
949 nodes[j].summary_rchild = NULL;
950 nodes[j].summary_max_specialness = 0;
951 tree_insert(&tree, &nodes[j]);
952 }
953 for (int j = 0; j < nremovals; j++) {
954 int victim = (int)gen_rand64_range(sfmt, nnodes);
955 if (!nodes[victim].mid_remove) {
956 tree_remove(&tree, &nodes[victim]);
957 nodes[victim].mid_remove = true;
958 }
959 }
960 for (int j = 0; j < nnodes; j++) {
961 if (nodes[j].mid_remove) {
962 nodes[j].mid_remove = false;
963 nodes[j].key = 2 * gen_rand64(sfmt);
964 tree_insert(&tree, &nodes[j]);
965 }
966 }
967 for (int j = 0; j < nupdates; j++) {
968 uint32_t ind = gen_rand32_range(sfmt, nnodes);
969 nodes[ind].specialness = 1 - nodes[ind].specialness;
970 tree_update_summaries(&tree, &nodes[ind]);
971 check_consistency(&tree, nodes, nnodes);
972 }
973 }
974}
975
976TEST_BEGIN(test_rb_update_search) {
977 summarize_always_returns_true = false;
978 do_update_search_test(2, 100, 3, 50);
979 do_update_search_test(5, 100, 3, 50);
980 do_update_search_test(12, 100, 5, 1000);
981 do_update_search_test(100, 1, 50, 500);
982}
983TEST_END
984
985typedef rb_tree(node_t) unsummarized_tree_t;
986rb_gen(static UNUSED, unsummarized_tree_, unsummarized_tree_t, node_t, link,
987 node_cmp);
988
989static node_t *
990unsummarized_tree_iterate_cb(unsummarized_tree_t *tree, node_t *node,
991 void *data) {
992 unsigned *i = (unsigned *)data;
993 (*i)++;
994 return NULL;
995}
996/*
997 * The unsummarized and summarized funtionality is implemented via the same
998 * functions; we don't really need to do much more than test that we can exclude
999 * the filtered functionality without anything breaking.
1000 */
1001TEST_BEGIN(test_rb_unsummarized) {
1002 unsummarized_tree_t tree;
1003 unsummarized_tree_new(&tree);
1004 unsigned nnodes = 0;
1005 unsummarized_tree_iter(&tree, NULL, &unsummarized_tree_iterate_cb,
1006 &nnodes);
1007 expect_u_eq(0, nnodes, "");
1008}
1009TEST_END
1010
1011int
1012main(void) {
1013 return test_no_reentrancy(
1014 test_rb_empty,
1015 test_rb_random,
1016 test_rb_filter_simple,
1017 test_rb_update_search,
1018 test_rb_unsummarized);
1019}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/retained.c b/examples/redis-unstable/deps/jemalloc/test/unit/retained.c
deleted file mode 100644
index aa9f684..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/retained.c
+++ /dev/null
@@ -1,188 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/san.h"
4#include "jemalloc/internal/spin.h"
5
6static unsigned arena_ind;
7static size_t sz;
8static size_t esz;
9#define NEPOCHS 8
10#define PER_THD_NALLOCS 1
11static atomic_u_t epoch;
12static atomic_u_t nfinished;
13
14static unsigned
15do_arena_create(extent_hooks_t *h) {
16 unsigned new_arena_ind;
17 size_t ind_sz = sizeof(unsigned);
18 expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
19 (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
20 "Unexpected mallctl() failure");
21 return new_arena_ind;
22}
23
24static void
25do_arena_destroy(unsigned ind) {
26 size_t mib[3];
27 size_t miblen;
28
29 miblen = sizeof(mib)/sizeof(size_t);
30 expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
31 "Unexpected mallctlnametomib() failure");
32 mib[1] = (size_t)ind;
33 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
34 "Unexpected mallctlbymib() failure");
35}
36
37static void
38do_refresh(void) {
39 uint64_t refresh_epoch = 1;
40 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
41 sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
42}
43
44static size_t
45do_get_size_impl(const char *cmd, unsigned ind) {
46 size_t mib[4];
47 size_t miblen = sizeof(mib) / sizeof(size_t);
48 size_t z = sizeof(size_t);
49
50 expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
51 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
52 mib[2] = ind;
53 size_t size;
54 expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
55 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
56
57 return size;
58}
59
60static size_t
61do_get_active(unsigned ind) {
62 return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
63}
64
65static size_t
66do_get_mapped(unsigned ind) {
67 return do_get_size_impl("stats.arenas.0.mapped", ind);
68}
69
70static void *
71thd_start(void *arg) {
72 for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
73 /* Busy-wait for next epoch. */
74 unsigned cur_epoch;
75 spin_t spinner = SPIN_INITIALIZER;
76 while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
77 next_epoch) {
78 spin_adaptive(&spinner);
79 }
80 expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
81
82 /*
83 * Allocate. The main thread will reset the arena, so there's
84 * no need to deallocate.
85 */
86 for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
87 void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
88 MALLOCX_TCACHE_NONE
89 );
90 expect_ptr_not_null(p,
91 "Unexpected mallocx() failure\n");
92 }
93
94 /* Let the main thread know we've finished this iteration. */
95 atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
96 }
97
98 return NULL;
99}
100
101TEST_BEGIN(test_retained) {
102 test_skip_if(!config_stats);
103 test_skip_if(opt_hpa);
104
105 arena_ind = do_arena_create(NULL);
106 sz = nallocx(HUGEPAGE, 0);
107 size_t guard_sz = san_guard_enabled() ? SAN_PAGE_GUARDS_SIZE : 0;
108 esz = sz + sz_large_pad + guard_sz;
109
110 atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
111
112 unsigned nthreads = ncpus * 2;
113 if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
114 nthreads = 16; /* 32-bit platform could run out of vaddr. */
115 }
116 VARIABLE_ARRAY(thd_t, threads, nthreads);
117 for (unsigned i = 0; i < nthreads; i++) {
118 thd_create(&threads[i], thd_start, NULL);
119 }
120
121 for (unsigned e = 1; e < NEPOCHS; e++) {
122 atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
123 atomic_store_u(&epoch, e, ATOMIC_RELEASE);
124
125 /* Wait for threads to finish allocating. */
126 spin_t spinner = SPIN_INITIALIZER;
127 while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
128 spin_adaptive(&spinner);
129 }
130
131 /*
132 * Assert that retained is no more than the sum of size classes
133 * that should have been used to satisfy the worker threads'
134 * requests, discounting per growth fragmentation.
135 */
136 do_refresh();
137
138 size_t allocated = (esz - guard_sz) * nthreads *
139 PER_THD_NALLOCS;
140 size_t active = do_get_active(arena_ind);
141 expect_zu_le(allocated, active, "Unexpected active memory");
142 size_t mapped = do_get_mapped(arena_ind);
143 expect_zu_le(active, mapped, "Unexpected mapped memory");
144
145 arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
146 size_t usable = 0;
147 size_t fragmented = 0;
148 for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
149 arena->pa_shard.pac.exp_grow.next; pind++) {
150 size_t psz = sz_pind2sz(pind);
151 size_t psz_fragmented = psz % esz;
152 size_t psz_usable = psz - psz_fragmented;
153 /*
154 * Only consider size classes that wouldn't be skipped.
155 */
156 if (psz_usable > 0) {
157 expect_zu_lt(usable, allocated,
158 "Excessive retained memory "
159 "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
160 allocated);
161 fragmented += psz_fragmented;
162 usable += psz_usable;
163 }
164 }
165
166 /*
167 * Clean up arena. Destroying and recreating the arena
168 * is simpler that specifying extent hooks that deallocate
169 * (rather than retaining) during reset.
170 */
171 do_arena_destroy(arena_ind);
172 expect_u_eq(do_arena_create(NULL), arena_ind,
173 "Unexpected arena index");
174 }
175
176 for (unsigned i = 0; i < nthreads; i++) {
177 thd_join(threads[i], NULL);
178 }
179
180 do_arena_destroy(arena_ind);
181}
182TEST_END
183
184int
185main(void) {
186 return test(
187 test_retained);
188}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/rtree.c b/examples/redis-unstable/deps/jemalloc/test/unit/rtree.c
deleted file mode 100644
index 4101b72..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/rtree.c
+++ /dev/null
@@ -1,289 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/rtree.h"
4
5#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
6
7/* Potentially too large to safely place on the stack. */
8rtree_t test_rtree;
9
10TEST_BEGIN(test_rtree_read_empty) {
11 tsdn_t *tsdn;
12
13 tsdn = tsdn_fetch();
14
15 base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
16 /* metadata_use_hooks */ true);
17 expect_ptr_not_null(base, "Unexpected base_new failure");
18
19 rtree_t *rtree = &test_rtree;
20 rtree_ctx_t rtree_ctx;
21 rtree_ctx_data_init(&rtree_ctx);
22 expect_false(rtree_new(rtree, base, false),
23 "Unexpected rtree_new() failure");
24 rtree_contents_t contents;
25 expect_true(rtree_read_independent(tsdn, rtree, &rtree_ctx, PAGE,
26 &contents), "rtree_read_independent() should fail on empty rtree.");
27
28 base_delete(tsdn, base);
29}
30TEST_END
31
32#undef NTHREADS
33#undef NITERS
34#undef SEED
35
36static edata_t *
37alloc_edata(void) {
38 void *ret = mallocx(sizeof(edata_t), MALLOCX_ALIGN(EDATA_ALIGNMENT));
39 assert_ptr_not_null(ret, "Unexpected mallocx() failure");
40
41 return ret;
42}
43
44TEST_BEGIN(test_rtree_extrema) {
45 edata_t *edata_a, *edata_b;
46 edata_a = alloc_edata();
47 edata_b = alloc_edata();
48 edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
49 false, sz_size2index(SC_LARGE_MINCLASS), 0,
50 extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
51 edata_init(edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
52 extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
53
54 tsdn_t *tsdn = tsdn_fetch();
55
56 base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
57 /* metadata_use_hooks */ true);
58 expect_ptr_not_null(base, "Unexpected base_new failure");
59
60 rtree_t *rtree = &test_rtree;
61 rtree_ctx_t rtree_ctx;
62 rtree_ctx_data_init(&rtree_ctx);
63 expect_false(rtree_new(rtree, base, false),
64 "Unexpected rtree_new() failure");
65
66 rtree_contents_t contents_a;
67 contents_a.edata = edata_a;
68 contents_a.metadata.szind = edata_szind_get(edata_a);
69 contents_a.metadata.slab = edata_slab_get(edata_a);
70 contents_a.metadata.is_head = edata_is_head_get(edata_a);
71 contents_a.metadata.state = edata_state_get(edata_a);
72 expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
73 "Unexpected rtree_write() failure");
74 expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
75 "Unexpected rtree_write() failure");
76 rtree_contents_t read_contents_a = rtree_read(tsdn, rtree, &rtree_ctx,
77 PAGE);
78 expect_true(contents_a.edata == read_contents_a.edata
79 && contents_a.metadata.szind == read_contents_a.metadata.szind
80 && contents_a.metadata.slab == read_contents_a.metadata.slab
81 && contents_a.metadata.is_head == read_contents_a.metadata.is_head
82 && contents_a.metadata.state == read_contents_a.metadata.state,
83 "rtree_read() should return previously set value");
84
85 rtree_contents_t contents_b;
86 contents_b.edata = edata_b;
87 contents_b.metadata.szind = edata_szind_get_maybe_invalid(edata_b);
88 contents_b.metadata.slab = edata_slab_get(edata_b);
89 contents_b.metadata.is_head = edata_is_head_get(edata_b);
90 contents_b.metadata.state = edata_state_get(edata_b);
91 expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
92 contents_b), "Unexpected rtree_write() failure");
93 rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx,
94 ~((uintptr_t)0));
95 assert_true(contents_b.edata == read_contents_b.edata
96 && contents_b.metadata.szind == read_contents_b.metadata.szind
97 && contents_b.metadata.slab == read_contents_b.metadata.slab
98 && contents_b.metadata.is_head == read_contents_b.metadata.is_head
99 && contents_b.metadata.state == read_contents_b.metadata.state,
100 "rtree_read() should return previously set value");
101
102 base_delete(tsdn, base);
103}
104TEST_END
105
106TEST_BEGIN(test_rtree_bits) {
107 tsdn_t *tsdn = tsdn_fetch();
108 base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
109 /* metadata_use_hooks */ true);
110 expect_ptr_not_null(base, "Unexpected base_new failure");
111
112 uintptr_t keys[] = {PAGE, PAGE + 1,
113 PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
114 edata_t *edata_c = alloc_edata();
115 edata_init(edata_c, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
116 extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
117
118 rtree_t *rtree = &test_rtree;
119 rtree_ctx_t rtree_ctx;
120 rtree_ctx_data_init(&rtree_ctx);
121 expect_false(rtree_new(rtree, base, false),
122 "Unexpected rtree_new() failure");
123
124 for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
125 rtree_contents_t contents;
126 contents.edata = edata_c;
127 contents.metadata.szind = SC_NSIZES;
128 contents.metadata.slab = false;
129 contents.metadata.is_head = false;
130 contents.metadata.state = extent_state_active;
131
132 expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
133 contents), "Unexpected rtree_write() failure");
134 for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
135 expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
136 keys[j]).edata, edata_c,
137 "rtree_edata_read() should return previously set "
138 "value and ignore insignificant key bits; i=%u, "
139 "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
140 j, keys[i], keys[j]);
141 }
142 expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
143 (((uintptr_t)2) << LG_PAGE)).edata,
144 "Only leftmost rtree leaf should be set; i=%u", i);
145 rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
146 }
147
148 base_delete(tsdn, base);
149}
150TEST_END
151
152TEST_BEGIN(test_rtree_random) {
153#define NSET 16
154#define SEED 42
155 sfmt_t *sfmt = init_gen_rand(SEED);
156 tsdn_t *tsdn = tsdn_fetch();
157
158 base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
159 /* metadata_use_hooks */ true);
160 expect_ptr_not_null(base, "Unexpected base_new failure");
161
162 uintptr_t keys[NSET];
163 rtree_t *rtree = &test_rtree;
164 rtree_ctx_t rtree_ctx;
165 rtree_ctx_data_init(&rtree_ctx);
166
167 edata_t *edata_d = alloc_edata();
168 edata_init(edata_d, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
169 extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
170
171 expect_false(rtree_new(rtree, base, false),
172 "Unexpected rtree_new() failure");
173
174 for (unsigned i = 0; i < NSET; i++) {
175 keys[i] = (uintptr_t)gen_rand64(sfmt);
176 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
177 &rtree_ctx, keys[i], false, true);
178 expect_ptr_not_null(elm,
179 "Unexpected rtree_leaf_elm_lookup() failure");
180 rtree_contents_t contents;
181 contents.edata = edata_d;
182 contents.metadata.szind = SC_NSIZES;
183 contents.metadata.slab = false;
184 contents.metadata.is_head = false;
185 contents.metadata.state = edata_state_get(edata_d);
186 rtree_leaf_elm_write(tsdn, rtree, elm, contents);
187 expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
188 keys[i]).edata, edata_d,
189 "rtree_edata_read() should return previously set value");
190 }
191 for (unsigned i = 0; i < NSET; i++) {
192 expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
193 keys[i]).edata, edata_d,
194 "rtree_edata_read() should return previously set value, "
195 "i=%u", i);
196 }
197
198 for (unsigned i = 0; i < NSET; i++) {
199 rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
200 expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
201 keys[i]).edata,
202 "rtree_edata_read() should return previously set value");
203 }
204 for (unsigned i = 0; i < NSET; i++) {
205 expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
206 keys[i]).edata,
207 "rtree_edata_read() should return previously set value");
208 }
209
210 base_delete(tsdn, base);
211 fini_gen_rand(sfmt);
212#undef NSET
213#undef SEED
214}
215TEST_END
216
217static void
218test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start,
219 uintptr_t end) {
220 rtree_ctx_t rtree_ctx;
221 rtree_ctx_data_init(&rtree_ctx);
222
223 edata_t *edata_e = alloc_edata();
224 edata_init(edata_e, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
225 extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
226 rtree_contents_t contents;
227 contents.edata = edata_e;
228 contents.metadata.szind = SC_NSIZES;
229 contents.metadata.slab = false;
230 contents.metadata.is_head = false;
231 contents.metadata.state = extent_state_active;
232
233 expect_false(rtree_write(tsdn, rtree, &rtree_ctx, start,
234 contents), "Unexpected rtree_write() failure");
235 expect_false(rtree_write(tsdn, rtree, &rtree_ctx, end,
236 contents), "Unexpected rtree_write() failure");
237
238 rtree_write_range(tsdn, rtree, &rtree_ctx, start, end, contents);
239 for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
240 expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
241 start + (i << LG_PAGE)).edata, edata_e,
242 "rtree_edata_read() should return previously set value");
243 }
244 rtree_clear_range(tsdn, rtree, &rtree_ctx, start, end);
245 rtree_leaf_elm_t *elm;
246 for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
247 elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx,
248 start + (i << LG_PAGE), false, false);
249 expect_ptr_not_null(elm, "Should have been initialized.");
250 expect_ptr_null(rtree_leaf_elm_read(tsdn, rtree, elm,
251 false).edata, "Should have been cleared.");
252 }
253}
254
255TEST_BEGIN(test_rtree_range) {
256 tsdn_t *tsdn = tsdn_fetch();
257 base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
258 /* metadata_use_hooks */ true);
259 expect_ptr_not_null(base, "Unexpected base_new failure");
260
261 rtree_t *rtree = &test_rtree;
262 expect_false(rtree_new(rtree, base, false),
263 "Unexpected rtree_new() failure");
264
265 /* Not crossing rtree node boundary first. */
266 uintptr_t start = ZU(1) << rtree_leaf_maskbits();
267 uintptr_t end = start + (ZU(100) << LG_PAGE);
268 test_rtree_range_write(tsdn, rtree, start, end);
269
270 /* Crossing rtree node boundary. */
271 start = (ZU(1) << rtree_leaf_maskbits()) - (ZU(10) << LG_PAGE);
272 end = start + (ZU(100) << LG_PAGE);
273 assert_ptr_ne((void *)rtree_leafkey(start), (void *)rtree_leafkey(end),
274 "The range should span across two rtree nodes");
275 test_rtree_range_write(tsdn, rtree, start, end);
276
277 base_delete(tsdn, base);
278}
279TEST_END
280
281int
282main(void) {
283 return test(
284 test_rtree_read_empty,
285 test_rtree_extrema,
286 test_rtree_bits,
287 test_rtree_random,
288 test_rtree_range);
289}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/safety_check.c b/examples/redis-unstable/deps/jemalloc/test/unit/safety_check.c
deleted file mode 100644
index 8472667..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/safety_check.c
+++ /dev/null
@@ -1,163 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/safety_check.h"
4
5/*
6 * Note that we get called through safety_check.sh, which turns on sampling for
7 * everything.
8 */
9
10bool fake_abort_called;
11void fake_abort(const char *message) {
12 (void)message;
13 fake_abort_called = true;
14}
15
16static void
17buffer_overflow_write(char *ptr, size_t size) {
18 /* Avoid overflow warnings. */
19 volatile size_t idx = size;
20 ptr[idx] = 0;
21}
22
23TEST_BEGIN(test_malloc_free_overflow) {
24 test_skip_if(!config_prof);
25 test_skip_if(!config_opt_safety_checks);
26
27 safety_check_set_abort(&fake_abort);
28 /* Buffer overflow! */
29 char* ptr = malloc(128);
30 buffer_overflow_write(ptr, 128);
31 free(ptr);
32 safety_check_set_abort(NULL);
33
34 expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
35 fake_abort_called = false;
36}
37TEST_END
38
39TEST_BEGIN(test_mallocx_dallocx_overflow) {
40 test_skip_if(!config_prof);
41 test_skip_if(!config_opt_safety_checks);
42
43 safety_check_set_abort(&fake_abort);
44 /* Buffer overflow! */
45 char* ptr = mallocx(128, 0);
46 buffer_overflow_write(ptr, 128);
47 dallocx(ptr, 0);
48 safety_check_set_abort(NULL);
49
50 expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
51 fake_abort_called = false;
52}
53TEST_END
54
55TEST_BEGIN(test_malloc_sdallocx_overflow) {
56 test_skip_if(!config_prof);
57 test_skip_if(!config_opt_safety_checks);
58
59 safety_check_set_abort(&fake_abort);
60 /* Buffer overflow! */
61 char* ptr = malloc(128);
62 buffer_overflow_write(ptr, 128);
63 sdallocx(ptr, 128, 0);
64 safety_check_set_abort(NULL);
65
66 expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
67 fake_abort_called = false;
68}
69TEST_END
70
71TEST_BEGIN(test_realloc_overflow) {
72 test_skip_if(!config_prof);
73 test_skip_if(!config_opt_safety_checks);
74
75 safety_check_set_abort(&fake_abort);
76 /* Buffer overflow! */
77 char* ptr = malloc(128);
78 buffer_overflow_write(ptr, 128);
79 ptr = realloc(ptr, 129);
80 safety_check_set_abort(NULL);
81 free(ptr);
82
83 expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
84 fake_abort_called = false;
85}
86TEST_END
87
88TEST_BEGIN(test_rallocx_overflow) {
89 test_skip_if(!config_prof);
90 test_skip_if(!config_opt_safety_checks);
91
92 safety_check_set_abort(&fake_abort);
93 /* Buffer overflow! */
94 char* ptr = malloc(128);
95 buffer_overflow_write(ptr, 128);
96 ptr = rallocx(ptr, 129, 0);
97 safety_check_set_abort(NULL);
98 free(ptr);
99
100 expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
101 fake_abort_called = false;
102}
103TEST_END
104
105TEST_BEGIN(test_xallocx_overflow) {
106 test_skip_if(!config_prof);
107 test_skip_if(!config_opt_safety_checks);
108
109 safety_check_set_abort(&fake_abort);
110 /* Buffer overflow! */
111 char* ptr = malloc(128);
112 buffer_overflow_write(ptr, 128);
113 size_t result = xallocx(ptr, 129, 0, 0);
114 expect_zu_eq(result, 128, "");
115 free(ptr);
116 expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
117 fake_abort_called = false;
118 safety_check_set_abort(NULL);
119}
120TEST_END
121
122TEST_BEGIN(test_realloc_no_overflow) {
123 char* ptr = malloc(128);
124 ptr = realloc(ptr, 256);
125 ptr[128] = 0;
126 ptr[255] = 0;
127 free(ptr);
128
129 ptr = malloc(128);
130 ptr = realloc(ptr, 64);
131 ptr[63] = 0;
132 ptr[0] = 0;
133 free(ptr);
134}
135TEST_END
136
137TEST_BEGIN(test_rallocx_no_overflow) {
138 char* ptr = malloc(128);
139 ptr = rallocx(ptr, 256, 0);
140 ptr[128] = 0;
141 ptr[255] = 0;
142 free(ptr);
143
144 ptr = malloc(128);
145 ptr = rallocx(ptr, 64, 0);
146 ptr[63] = 0;
147 ptr[0] = 0;
148 free(ptr);
149}
150TEST_END
151
152int
153main(void) {
154 return test(
155 test_malloc_free_overflow,
156 test_mallocx_dallocx_overflow,
157 test_malloc_sdallocx_overflow,
158 test_realloc_overflow,
159 test_rallocx_overflow,
160 test_xallocx_overflow,
161 test_realloc_no_overflow,
162 test_rallocx_no_overflow);
163}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/safety_check.sh b/examples/redis-unstable/deps/jemalloc/test/unit/safety_check.sh
deleted file mode 100644
index 485f9bf..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/safety_check.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/san.c b/examples/redis-unstable/deps/jemalloc/test/unit/san.c
deleted file mode 100644
index 5b98f52..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/san.c
+++ /dev/null
@@ -1,207 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "test/arena_util.h"
3#include "test/san.h"
4
5#include "jemalloc/internal/san.h"
6
7static void
8verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
9 expect_true(extent_is_guarded(tsdn, ptr),
10 "All extents should be guarded.");
11}
12
13#define MAX_SMALL_ALLOCATIONS 4096
14void *small_alloc[MAX_SMALL_ALLOCATIONS];
15
16/*
17 * This test allocates page sized slabs and checks that every two slabs have
18 * at least one page in between them. That page is supposed to be the guard
19 * page.
20 */
21TEST_BEGIN(test_guarded_small) {
22 test_skip_if(opt_prof);
23
24 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
25 unsigned npages = 16, pages_found = 0, ends_found = 0;
26 VARIABLE_ARRAY(uintptr_t, pages, npages);
27
28 /* Allocate to get sanitized pointers. */
29 size_t slab_sz = PAGE;
30 size_t sz = slab_sz / 8;
31 unsigned n_alloc = 0;
32 while (n_alloc < MAX_SMALL_ALLOCATIONS) {
33 void *ptr = malloc(sz);
34 expect_ptr_not_null(ptr, "Unexpected malloc() failure");
35 small_alloc[n_alloc] = ptr;
36 verify_extent_guarded(tsdn, ptr);
37 if ((uintptr_t)ptr % PAGE == 0) {
38 assert_u_lt(pages_found, npages,
39 "Unexpectedly large number of page aligned allocs");
40 pages[pages_found++] = (uintptr_t)ptr;
41 }
42 if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
43 ends_found++;
44 }
45 n_alloc++;
46 if (pages_found == npages && ends_found == npages) {
47 break;
48 }
49 }
50 /* Should found the ptrs being checked for overflow and underflow. */
51 expect_u_eq(pages_found, npages, "Could not found the expected pages.");
52 expect_u_eq(ends_found, npages, "Could not found the expected pages.");
53
54 /* Verify the pages are not continuous, i.e. separated by guards. */
55 for (unsigned i = 0; i < npages - 1; i++) {
56 for (unsigned j = i + 1; j < npages; j++) {
57 uintptr_t ptr_diff = pages[i] > pages[j] ?
58 pages[i] - pages[j] : pages[j] - pages[i];
59 expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
60 "There should be at least one pages between "
61 "guarded slabs");
62 }
63 }
64
65 for (unsigned i = 0; i < n_alloc + 1; i++) {
66 free(small_alloc[i]);
67 }
68}
69TEST_END
70
71TEST_BEGIN(test_guarded_large) {
72 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
73 unsigned nlarge = 32;
74 VARIABLE_ARRAY(uintptr_t, large, nlarge);
75
76 /* Allocate to get sanitized pointers. */
77 size_t large_sz = SC_LARGE_MINCLASS;
78 for (unsigned i = 0; i < nlarge; i++) {
79 void *ptr = malloc(large_sz);
80 verify_extent_guarded(tsdn, ptr);
81 expect_ptr_not_null(ptr, "Unexpected malloc() failure");
82 large[i] = (uintptr_t)ptr;
83 }
84
85 /* Verify the pages are not continuous, i.e. separated by guards. */
86 for (unsigned i = 0; i < nlarge; i++) {
87 for (unsigned j = i + 1; j < nlarge; j++) {
88 uintptr_t ptr_diff = large[i] > large[j] ?
89 large[i] - large[j] : large[j] - large[i];
90 expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
91 "There should be at least two pages between "
92 " guarded large allocations");
93 }
94 }
95
96 for (unsigned i = 0; i < nlarge; i++) {
97 free((void *)large[i]);
98 }
99}
100TEST_END
101
102static void
103verify_pdirty(unsigned arena_ind, uint64_t expected) {
104 uint64_t pdirty = get_arena_pdirty(arena_ind);
105 expect_u64_eq(pdirty, expected / PAGE,
106 "Unexpected dirty page amount.");
107}
108
109static void
110verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
111 uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
112 expect_u64_eq(pmuzzy, expected / PAGE,
113 "Unexpected muzzy page amount.");
114}
115
116TEST_BEGIN(test_guarded_decay) {
117 unsigned arena_ind = do_arena_create(-1, -1);
118 do_decay(arena_ind);
119 do_purge(arena_ind);
120
121 verify_pdirty(arena_ind, 0);
122 verify_pmuzzy(arena_ind, 0);
123
124 /* Verify that guarded extents as dirty. */
125 size_t sz1 = PAGE, sz2 = PAGE * 2;
126 /* W/o maps_coalesce, guarded extents are unguarded eagerly. */
127 size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE;
128 generate_dirty(arena_ind, sz1);
129 verify_pdirty(arena_ind, sz1 + add_guard_size);
130 verify_pmuzzy(arena_ind, 0);
131
132 /* Should reuse the first extent. */
133 generate_dirty(arena_ind, sz1);
134 verify_pdirty(arena_ind, sz1 + add_guard_size);
135 verify_pmuzzy(arena_ind, 0);
136
137 /* Should not reuse; expect new dirty pages. */
138 generate_dirty(arena_ind, sz2);
139 verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
140 verify_pmuzzy(arena_ind, 0);
141
142 tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
143 int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
144
145 /* Should reuse dirty extents for the two mallocx. */
146 void *p1 = do_mallocx(sz1, flags);
147 verify_extent_guarded(tsdn, p1);
148 verify_pdirty(arena_ind, sz2 + add_guard_size);
149
150 void *p2 = do_mallocx(sz2, flags);
151 verify_extent_guarded(tsdn, p2);
152 verify_pdirty(arena_ind, 0);
153 verify_pmuzzy(arena_ind, 0);
154
155 dallocx(p1, flags);
156 verify_pdirty(arena_ind, sz1 + add_guard_size);
157 dallocx(p2, flags);
158 verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
159 verify_pmuzzy(arena_ind, 0);
160
161 do_purge(arena_ind);
162 verify_pdirty(arena_ind, 0);
163 verify_pmuzzy(arena_ind, 0);
164
165 if (config_stats) {
166 expect_u64_eq(get_arena_npurge(arena_ind), 1,
167 "Expected purging to occur");
168 expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
169 "Expected purging to occur");
170 expect_u64_eq(get_arena_dirty_purged(arena_ind),
171 (sz1 + sz2 + 2 * add_guard_size) / PAGE,
172 "Expected purging to occur");
173 expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
174 "Expected purging to occur");
175 }
176
177 if (opt_retain) {
178 /*
179 * With retain, guarded extents are not mergable and will be
180 * cached in ecache_retained. They should be reused.
181 */
182 void *new_p1 = do_mallocx(sz1, flags);
183 verify_extent_guarded(tsdn, p1);
184 expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
185
186 void *new_p2 = do_mallocx(sz2, flags);
187 verify_extent_guarded(tsdn, p2);
188 expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
189
190 dallocx(new_p1, flags);
191 verify_pdirty(arena_ind, sz1 + add_guard_size);
192 dallocx(new_p2, flags);
193 verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
194 verify_pmuzzy(arena_ind, 0);
195 }
196
197 do_arena_destroy(arena_ind);
198}
199TEST_END
200
201int
202main(void) {
203 return test(
204 test_guarded_small,
205 test_guarded_large,
206 test_guarded_decay);
207}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/san.sh b/examples/redis-unstable/deps/jemalloc/test/unit/san.sh
deleted file mode 100644
index 933b4a4..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/san.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="san_guard_large:1,san_guard_small:1"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/san_bump.c b/examples/redis-unstable/deps/jemalloc/test/unit/san_bump.c
deleted file mode 100644
index cafa37f..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/san_bump.c
+++ /dev/null
@@ -1,111 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "test/arena_util.h"
3
4#include "jemalloc/internal/arena_structs.h"
5#include "jemalloc/internal/san_bump.h"
6
7TEST_BEGIN(test_san_bump_alloc) {
8 test_skip_if(!maps_coalesce || !opt_retain);
9
10 tsdn_t *tsdn = tsdn_fetch();
11
12 san_bump_alloc_t sba;
13 san_bump_alloc_init(&sba);
14
15 unsigned arena_ind = do_arena_create(0, 0);
16 assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
17
18 arena_t *arena = arena_get(tsdn, arena_ind, false);
19 pac_t *pac = &arena->pa_shard.pac;
20
21 size_t alloc_size = PAGE * 16;
22 size_t alloc_n = alloc_size / sizeof(unsigned);
23 edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
24 alloc_size, /* zero */ false);
25
26 expect_ptr_not_null(edata, "Failed to allocate edata");
27 expect_u_eq(edata_arena_ind_get(edata), arena_ind,
28 "Edata was assigned an incorrect arena id");
29 expect_zu_eq(edata_size_get(edata), alloc_size,
30 "Allocated edata of incorrect size");
31 expect_false(edata_slab_get(edata),
32 "Bump allocator incorrectly assigned 'slab' to true");
33 expect_true(edata_committed_get(edata), "Edata is not committed");
34
35 void *ptr = edata_addr_get(edata);
36 expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
37 /* Test that memory is allocated; no guard pages are misplaced */
38 for (unsigned i = 0; i < alloc_n; ++i) {
39 ((unsigned *)ptr)[i] = 1;
40 }
41
42 size_t alloc_size2 = PAGE * 28;
43 size_t alloc_n2 = alloc_size / sizeof(unsigned);
44 edata_t *edata2 = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
45 alloc_size2, /* zero */ true);
46
47 expect_ptr_not_null(edata2, "Failed to allocate edata");
48 expect_u_eq(edata_arena_ind_get(edata2), arena_ind,
49 "Edata was assigned an incorrect arena id");
50 expect_zu_eq(edata_size_get(edata2), alloc_size2,
51 "Allocated edata of incorrect size");
52 expect_false(edata_slab_get(edata2),
53 "Bump allocator incorrectly assigned 'slab' to true");
54 expect_true(edata_committed_get(edata2), "Edata is not committed");
55
56 void *ptr2 = edata_addr_get(edata2);
57 expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
58
59 uintptr_t ptrdiff = ptr2 > ptr ? (uintptr_t)ptr2 - (uintptr_t)ptr
60 : (uintptr_t)ptr - (uintptr_t)ptr2;
61 size_t between_allocs = (size_t)ptrdiff - alloc_size;
62
63 expect_zu_ge(between_allocs, PAGE,
64 "Guard page between allocs is missing");
65
66 for (unsigned i = 0; i < alloc_n2; ++i) {
67 expect_u_eq(((unsigned *)ptr2)[i], 0, "Memory is not zeroed");
68 }
69}
70TEST_END
71
72TEST_BEGIN(test_large_alloc_size) {
73 test_skip_if(!maps_coalesce || !opt_retain);
74
75 tsdn_t *tsdn = tsdn_fetch();
76
77 san_bump_alloc_t sba;
78 san_bump_alloc_init(&sba);
79
80 unsigned arena_ind = do_arena_create(0, 0);
81 assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
82
83 arena_t *arena = arena_get(tsdn, arena_ind, false);
84 pac_t *pac = &arena->pa_shard.pac;
85
86 size_t alloc_size = SBA_RETAINED_ALLOC_SIZE * 2;
87 edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
88 alloc_size, /* zero */ false);
89 expect_u_eq(edata_arena_ind_get(edata), arena_ind,
90 "Edata was assigned an incorrect arena id");
91 expect_zu_eq(edata_size_get(edata), alloc_size,
92 "Allocated edata of incorrect size");
93 expect_false(edata_slab_get(edata),
94 "Bump allocator incorrectly assigned 'slab' to true");
95 expect_true(edata_committed_get(edata), "Edata is not committed");
96
97 void *ptr = edata_addr_get(edata);
98 expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
99 /* Test that memory is allocated; no guard pages are misplaced */
100 for (unsigned i = 0; i < alloc_size / PAGE; ++i) {
101 *((char *)ptr + PAGE * i) = 1;
102 }
103}
104TEST_END
105
106int
107main(void) {
108 return test(
109 test_san_bump_alloc,
110 test_large_alloc_size);
111}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/sc.c b/examples/redis-unstable/deps/jemalloc/test/unit/sc.c
deleted file mode 100644
index d207481..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/sc.c
+++ /dev/null
@@ -1,33 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_update_slab_size) {
4 sc_data_t data;
5 memset(&data, 0, sizeof(data));
6 sc_data_init(&data);
7 sc_t *tiny = &data.sc[0];
8 size_t tiny_size = (ZU(1) << tiny->lg_base)
9 + (ZU(tiny->ndelta) << tiny->lg_delta);
10 size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1;
11 sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big);
12 expect_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
13
14 sc_data_update_slab_size(&data, 1, 10 * PAGE, 1);
15 for (int i = 0; i < data.nbins; i++) {
16 sc_t *sc = &data.sc[i];
17 size_t reg_size = (ZU(1) << sc->lg_base)
18 + (ZU(sc->ndelta) << sc->lg_delta);
19 if (reg_size <= PAGE) {
20 expect_d_eq(sc->pgs, 1, "Ignored valid page size hint");
21 } else {
22 expect_d_gt(sc->pgs, 1,
23 "Allowed invalid page size hint");
24 }
25 }
26}
27TEST_END
28
29int
30main(void) {
31 return test(
32 test_update_slab_size);
33}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/sec.c b/examples/redis-unstable/deps/jemalloc/test/unit/sec.c
deleted file mode 100644
index f3ec403..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/sec.c
+++ /dev/null
@@ -1,634 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/sec.h"
4
5typedef struct pai_test_allocator_s pai_test_allocator_t;
6struct pai_test_allocator_s {
7 pai_t pai;
8 bool alloc_fail;
9 size_t alloc_count;
10 size_t alloc_batch_count;
11 size_t dalloc_count;
12 size_t dalloc_batch_count;
13 /*
14 * We use a simple bump allocator as the implementation. This isn't
15 * *really* correct, since we may allow expansion into a subsequent
16 * allocation, but it's not like the SEC is really examining the
17 * pointers it gets back; this is mostly just helpful for debugging.
18 */
19 uintptr_t next_ptr;
20 size_t expand_count;
21 bool expand_return_value;
22 size_t shrink_count;
23 bool shrink_return_value;
24};
25
26static void
27test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
28 size_t max_bytes) {
29 sec_opts_t opts;
30 opts.nshards = 1;
31 opts.max_alloc = max_alloc;
32 opts.max_bytes = max_bytes;
33 /*
34 * Just choose reasonable defaults for these; most tests don't care so
35 * long as they're something reasonable.
36 */
37 opts.bytes_after_flush = max_bytes / 2;
38 opts.batch_fill_extra = 4;
39
40 /*
41 * We end up leaking this base, but that's fine; this test is
42 * short-running, and SECs are arena-scoped in reality.
43 */
44 base_t *base = base_new(TSDN_NULL, /* ind */ 123,
45 &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
46
47 bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
48 assert_false(err, "Unexpected initialization failure");
49 assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
50}
51
52static inline edata_t *
53pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
54 size_t alignment, bool zero, bool guarded, bool frequent_reuse,
55 bool *deferred_work_generated) {
56 assert(!guarded);
57 pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
58 if (ta->alloc_fail) {
59 return NULL;
60 }
61 edata_t *edata = malloc(sizeof(edata_t));
62 assert_ptr_not_null(edata, "");
63 ta->next_ptr += alignment - 1;
64 edata_init(edata, /* arena_ind */ 0,
65 (void *)(ta->next_ptr & ~(alignment - 1)), size,
66 /* slab */ false,
67 /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
68 /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
69 ta->next_ptr += size;
70 ta->alloc_count++;
71 return edata;
72}
73
74static inline size_t
75pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
76 size_t nallocs, edata_list_active_t *results,
77 bool *deferred_work_generated) {
78 pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
79 if (ta->alloc_fail) {
80 return 0;
81 }
82 for (size_t i = 0; i < nallocs; i++) {
83 edata_t *edata = malloc(sizeof(edata_t));
84 assert_ptr_not_null(edata, "");
85 edata_init(edata, /* arena_ind */ 0,
86 (void *)ta->next_ptr, size,
87 /* slab */ false, /* szind */ 0, /* sn */ 1,
88 extent_state_active, /* zero */ false, /* comitted */ true,
89 /* ranged */ false, EXTENT_NOT_HEAD);
90 ta->next_ptr += size;
91 ta->alloc_batch_count++;
92 edata_list_active_append(results, edata);
93 }
94 return nallocs;
95}
96
97static bool
98pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
99 size_t old_size, size_t new_size, bool zero,
100 bool *deferred_work_generated) {
101 pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
102 ta->expand_count++;
103 return ta->expand_return_value;
104}
105
106static bool
107pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
108 size_t old_size, size_t new_size, bool *deferred_work_generated) {
109 pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
110 ta->shrink_count++;
111 return ta->shrink_return_value;
112}
113
114static void
115pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
116 bool *deferred_work_generated) {
117 pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
118 ta->dalloc_count++;
119 free(edata);
120}
121
122static void
123pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
124 edata_list_active_t *list, bool *deferred_work_generated) {
125 pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
126
127 edata_t *edata;
128 while ((edata = edata_list_active_first(list)) != NULL) {
129 edata_list_active_remove(list, edata);
130 ta->dalloc_batch_count++;
131 free(edata);
132 }
133}
134
135static inline void
136pai_test_allocator_init(pai_test_allocator_t *ta) {
137 ta->alloc_fail = false;
138 ta->alloc_count = 0;
139 ta->alloc_batch_count = 0;
140 ta->dalloc_count = 0;
141 ta->dalloc_batch_count = 0;
142 /* Just don't start the edata at 0. */
143 ta->next_ptr = 10 * PAGE;
144 ta->expand_count = 0;
145 ta->expand_return_value = false;
146 ta->shrink_count = 0;
147 ta->shrink_return_value = false;
148 ta->pai.alloc = &pai_test_allocator_alloc;
149 ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
150 ta->pai.expand = &pai_test_allocator_expand;
151 ta->pai.shrink = &pai_test_allocator_shrink;
152 ta->pai.dalloc = &pai_test_allocator_dalloc;
153 ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
154}
155
156TEST_BEGIN(test_reuse) {
157 pai_test_allocator_t ta;
158 pai_test_allocator_init(&ta);
159 sec_t sec;
160 /*
161 * We can't use the "real" tsd, since we malloc within the test
162 * allocator hooks; we'd get lock inversion crashes. Eventually, we
163 * should have a way to mock tsds, but for now just don't do any
164 * lock-order checking.
165 */
166 tsdn_t *tsdn = TSDN_NULL;
167 /*
168 * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
169 * able to get to 33 pages in the cache before triggering a flush. We
170 * set the flush liimt to twice this amount, to avoid accidentally
171 * triggering a flush caused by the batch-allocation down the cache fill
172 * pathway disrupting ordering.
173 */
174 enum { NALLOCS = 11 };
175 edata_t *one_page[NALLOCS];
176 edata_t *two_page[NALLOCS];
177 bool deferred_work_generated = false;
178 test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
179 /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
180 for (int i = 0; i < NALLOCS; i++) {
181 one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
182 /* zero */ false, /* guarded */ false, /* frequent_reuse */
183 false, &deferred_work_generated);
184 expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
185 two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
186 /* zero */ false, /* guarded */ false, /* frequent_reuse */
187 false, &deferred_work_generated);
188 expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
189 }
190 expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
191 size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
192 expect_zu_le(2 * NALLOCS, max_allocs,
193 "Incorrect number of allocations");
194 expect_zu_eq(0, ta.dalloc_count,
195 "Incorrect number of allocations");
196 /*
197 * Free in a different order than we allocated, to make sure free-list
198 * separation works correctly.
199 */
200 for (int i = NALLOCS - 1; i >= 0; i--) {
201 pai_dalloc(tsdn, &sec.pai, one_page[i],
202 &deferred_work_generated);
203 }
204 for (int i = NALLOCS - 1; i >= 0; i--) {
205 pai_dalloc(tsdn, &sec.pai, two_page[i],
206 &deferred_work_generated);
207 }
208 expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
209 "Incorrect number of allocations");
210 expect_zu_eq(0, ta.dalloc_count,
211 "Incorrect number of allocations");
212 /*
213 * Check that the n'th most recent deallocated extent is returned for
214 * the n'th alloc request of a given size.
215 */
216 for (int i = 0; i < NALLOCS; i++) {
217 edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
218 /* zero */ false, /* guarded */ false, /* frequent_reuse */
219 false, &deferred_work_generated);
220 edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
221 /* zero */ false, /* guarded */ false, /* frequent_reuse */
222 false, &deferred_work_generated);
223 expect_ptr_eq(one_page[i], alloc1,
224 "Got unexpected allocation");
225 expect_ptr_eq(two_page[i], alloc2,
226 "Got unexpected allocation");
227 }
228 expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
229 "Incorrect number of allocations");
230 expect_zu_eq(0, ta.dalloc_count,
231 "Incorrect number of allocations");
232}
233TEST_END
234
235
236TEST_BEGIN(test_auto_flush) {
237 pai_test_allocator_t ta;
238 pai_test_allocator_init(&ta);
239 sec_t sec;
240 /* See the note above -- we can't use the real tsd. */
241 tsdn_t *tsdn = TSDN_NULL;
242 /*
243 * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
244 * able to get to 30 pages in the cache before triggering a flush. The
245 * choice of NALLOCS here is chosen to match the batch allocation
246 * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
247 * empty, even in the presence of batch allocation on fill).
248 * Eventually, once our allocation batching strategies become smarter,
249 * this should change.
250 */
251 enum { NALLOCS = 10 };
252 edata_t *extra_alloc;
253 edata_t *allocs[NALLOCS];
254 bool deferred_work_generated = false;
255 test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
256 /* max_bytes */ NALLOCS * PAGE);
257 for (int i = 0; i < NALLOCS; i++) {
258 allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
259 /* zero */ false, /* guarded */ false, /* frequent_reuse */
260 false, &deferred_work_generated);
261 expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
262 }
263 extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
264 /* guarded */ false, /* frequent_reuse */ false,
265 &deferred_work_generated);
266 expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
267 size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
268 expect_zu_le(NALLOCS + 1, max_allocs,
269 "Incorrect number of allocations");
270 expect_zu_eq(0, ta.dalloc_count,
271 "Incorrect number of allocations");
272 /* Free until the SEC is full, but should not have flushed yet. */
273 for (int i = 0; i < NALLOCS; i++) {
274 pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
275 }
276 expect_zu_le(NALLOCS + 1, max_allocs,
277 "Incorrect number of allocations");
278 expect_zu_eq(0, ta.dalloc_count,
279 "Incorrect number of allocations");
280 /*
281 * Free the extra allocation; this should trigger a flush. The internal
282 * flushing logic is allowed to get complicated; for now, we rely on our
283 * whitebox knowledge of the fact that the SEC flushes bins in their
284 * entirety when it decides to do so, and it has only one bin active
285 * right now.
286 */
287 pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
288 expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
289 "Incorrect number of allocations");
290 expect_zu_eq(0, ta.dalloc_count,
291 "Incorrect number of (non-batch) deallocations");
292 expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
293 "Incorrect number of batch deallocations");
294}
295TEST_END
296
297/*
298 * A disable and a flush are *almost* equivalent; the only difference is what
299 * happens afterwards; disabling disallows all future caching as well.
300 */
301static void
302do_disable_flush_test(bool is_disable) {
303 pai_test_allocator_t ta;
304 pai_test_allocator_init(&ta);
305 sec_t sec;
306 /* See the note above -- we can't use the real tsd. */
307 tsdn_t *tsdn = TSDN_NULL;
308
309 enum { NALLOCS = 11 };
310 edata_t *allocs[NALLOCS];
311 bool deferred_work_generated = false;
312 test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
313 /* max_bytes */ NALLOCS * PAGE);
314 for (int i = 0; i < NALLOCS; i++) {
315 allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
316 /* zero */ false, /* guarded */ false, /* frequent_reuse */
317 false, &deferred_work_generated);
318 expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
319 }
320 /* Free all but the last aloc. */
321 for (int i = 0; i < NALLOCS - 1; i++) {
322 pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
323 }
324 size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
325
326 expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
327 expect_zu_eq(0, ta.dalloc_count,
328 "Incorrect number of allocations");
329
330 if (is_disable) {
331 sec_disable(tsdn, &sec);
332 } else {
333 sec_flush(tsdn, &sec);
334 }
335
336 expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
337 "Incorrect number of allocations");
338 expect_zu_eq(0, ta.dalloc_count,
339 "Incorrect number of (non-batch) deallocations");
340 expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
341 "Incorrect number of batch deallocations");
342 size_t old_dalloc_batch_count = ta.dalloc_batch_count;
343
344 /*
345 * If we free into a disabled SEC, it should forward to the fallback.
346 * Otherwise, the SEC should accept the allocation.
347 */
348 pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
349 &deferred_work_generated);
350
351 expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
352 "Incorrect number of allocations");
353 expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
354 "Incorrect number of (non-batch) deallocations");
355 expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
356 "Incorrect number of batch deallocations");
357}
358
359TEST_BEGIN(test_disable) {
360 do_disable_flush_test(/* is_disable */ true);
361}
362TEST_END
363
364TEST_BEGIN(test_flush) {
365 do_disable_flush_test(/* is_disable */ false);
366}
367TEST_END
368
369TEST_BEGIN(test_max_alloc_respected) {
370 pai_test_allocator_t ta;
371 pai_test_allocator_init(&ta);
372 sec_t sec;
373 /* See the note above -- we can't use the real tsd. */
374 tsdn_t *tsdn = TSDN_NULL;
375
376 size_t max_alloc = 2 * PAGE;
377 size_t attempted_alloc = 3 * PAGE;
378
379 bool deferred_work_generated = false;
380
381 test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
382 /* max_bytes */ 1000 * PAGE);
383
384 for (size_t i = 0; i < 100; i++) {
385 expect_zu_eq(i, ta.alloc_count,
386 "Incorrect number of allocations");
387 expect_zu_eq(i, ta.dalloc_count,
388 "Incorrect number of deallocations");
389 edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
390 PAGE, /* zero */ false, /* guarded */ false,
391 /* frequent_reuse */ false, &deferred_work_generated);
392 expect_ptr_not_null(edata, "Unexpected alloc failure");
393 expect_zu_eq(i + 1, ta.alloc_count,
394 "Incorrect number of allocations");
395 expect_zu_eq(i, ta.dalloc_count,
396 "Incorrect number of deallocations");
397 pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
398 }
399}
400TEST_END
401
402TEST_BEGIN(test_expand_shrink_delegate) {
403 /*
404 * Expand and shrink shouldn't affect sec state; they should just
405 * delegate to the fallback PAI.
406 */
407 pai_test_allocator_t ta;
408 pai_test_allocator_init(&ta);
409 sec_t sec;
410 /* See the note above -- we can't use the real tsd. */
411 tsdn_t *tsdn = TSDN_NULL;
412
413 bool deferred_work_generated = false;
414
415 test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
416 /* max_bytes */ 1000 * PAGE);
417 edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
418 /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
419 &deferred_work_generated);
420 expect_ptr_not_null(edata, "Unexpected alloc failure");
421
422 bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
423 /* zero */ false, &deferred_work_generated);
424 expect_false(err, "Unexpected expand failure");
425 expect_zu_eq(1, ta.expand_count, "");
426 ta.expand_return_value = true;
427 err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
428 /* zero */ false, &deferred_work_generated);
429 expect_true(err, "Unexpected expand success");
430 expect_zu_eq(2, ta.expand_count, "");
431
432 err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
433 &deferred_work_generated);
434 expect_false(err, "Unexpected shrink failure");
435 expect_zu_eq(1, ta.shrink_count, "");
436 ta.shrink_return_value = true;
437 err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
438 &deferred_work_generated);
439 expect_true(err, "Unexpected shrink success");
440 expect_zu_eq(2, ta.shrink_count, "");
441}
442TEST_END
443
444TEST_BEGIN(test_nshards_0) {
445 pai_test_allocator_t ta;
446 pai_test_allocator_init(&ta);
447 sec_t sec;
448 /* See the note above -- we can't use the real tsd. */
449 tsdn_t *tsdn = TSDN_NULL;
450 base_t *base = base_new(TSDN_NULL, /* ind */ 123,
451 &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
452
453 sec_opts_t opts = SEC_OPTS_DEFAULT;
454 opts.nshards = 0;
455 sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
456
457 bool deferred_work_generated = false;
458 edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
459 /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
460 &deferred_work_generated);
461 pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
462
463 /* Both operations should have gone directly to the fallback. */
464 expect_zu_eq(1, ta.alloc_count, "");
465 expect_zu_eq(1, ta.dalloc_count, "");
466}
467TEST_END
468
469static void
470expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
471 sec_stats_t stats;
472 /*
473 * Check that the stats merging accumulates rather than overwrites by
474 * putting some (made up) data there to begin with.
475 */
476 stats.bytes = 123;
477 sec_stats_merge(tsdn, sec, &stats);
478 assert_zu_le(npages * PAGE + 123, stats.bytes, "");
479}
480
481TEST_BEGIN(test_stats_simple) {
482 pai_test_allocator_t ta;
483 pai_test_allocator_init(&ta);
484 sec_t sec;
485
486 /* See the note above -- we can't use the real tsd. */
487 tsdn_t *tsdn = TSDN_NULL;
488
489 enum {
490 NITERS = 100,
491 FLUSH_PAGES = 20,
492 };
493
494 bool deferred_work_generated = false;
495
496 test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
497 /* max_bytes */ FLUSH_PAGES * PAGE);
498
499 edata_t *allocs[FLUSH_PAGES];
500 for (size_t i = 0; i < FLUSH_PAGES; i++) {
501 allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
502 /* zero */ false, /* guarded */ false, /* frequent_reuse */
503 false, &deferred_work_generated);
504 expect_stats_pages(tsdn, &sec, 0);
505 }
506
507 /* Increase and decrease, without flushing. */
508 for (size_t i = 0; i < NITERS; i++) {
509 for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
510 pai_dalloc(tsdn, &sec.pai, allocs[j],
511 &deferred_work_generated);
512 expect_stats_pages(tsdn, &sec, j + 1);
513 }
514 for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
515 allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
516 /* zero */ false, /* guarded */ false,
517 /* frequent_reuse */ false,
518 &deferred_work_generated);
519 expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
520 }
521 }
522}
523TEST_END
524
525TEST_BEGIN(test_stats_auto_flush) {
526 pai_test_allocator_t ta;
527 pai_test_allocator_init(&ta);
528 sec_t sec;
529
530 /* See the note above -- we can't use the real tsd. */
531 tsdn_t *tsdn = TSDN_NULL;
532
533 enum {
534 FLUSH_PAGES = 10,
535 };
536
537 test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
538 /* max_bytes */ FLUSH_PAGES * PAGE);
539
540 edata_t *extra_alloc0;
541 edata_t *extra_alloc1;
542 edata_t *allocs[2 * FLUSH_PAGES];
543
544 bool deferred_work_generated = false;
545
546 extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
547 /* guarded */ false, /* frequent_reuse */ false,
548 &deferred_work_generated);
549 extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
550 /* guarded */ false, /* frequent_reuse */ false,
551 &deferred_work_generated);
552
553 for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
554 allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
555 /* zero */ false, /* guarded */ false, /* frequent_reuse */
556 false, &deferred_work_generated);
557 }
558
559 for (size_t i = 0; i < FLUSH_PAGES; i++) {
560 pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
561 }
562 pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
563
564 /* Flush the remaining pages; stats should still work. */
565 for (size_t i = 0; i < FLUSH_PAGES; i++) {
566 pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
567 &deferred_work_generated);
568 }
569
570 pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
571
572 expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
573 - ta.dalloc_count - ta.dalloc_batch_count);
574}
575TEST_END
576
577TEST_BEGIN(test_stats_manual_flush) {
578 pai_test_allocator_t ta;
579 pai_test_allocator_init(&ta);
580 sec_t sec;
581
582 /* See the note above -- we can't use the real tsd. */
583 tsdn_t *tsdn = TSDN_NULL;
584
585 enum {
586 FLUSH_PAGES = 10,
587 };
588
589 test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
590 /* max_bytes */ FLUSH_PAGES * PAGE);
591
592 bool deferred_work_generated = false;
593 edata_t *allocs[FLUSH_PAGES];
594 for (size_t i = 0; i < FLUSH_PAGES; i++) {
595 allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
596 /* zero */ false, /* guarded */ false, /* frequent_reuse */
597 false, &deferred_work_generated);
598 expect_stats_pages(tsdn, &sec, 0);
599 }
600
601 /* Dalloc the first half of the allocations. */
602 for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
603 pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
604 expect_stats_pages(tsdn, &sec, i + 1);
605 }
606
607 sec_flush(tsdn, &sec);
608 expect_stats_pages(tsdn, &sec, 0);
609
610 /* Flush the remaining pages. */
611 for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
612 pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
613 &deferred_work_generated);
614 expect_stats_pages(tsdn, &sec, i + 1);
615 }
616 sec_disable(tsdn, &sec);
617 expect_stats_pages(tsdn, &sec, 0);
618}
619TEST_END
620
621int
622main(void) {
623 return test(
624 test_reuse,
625 test_auto_flush,
626 test_disable,
627 test_flush,
628 test_max_alloc_respected,
629 test_expand_shrink_delegate,
630 test_nshards_0,
631 test_stats_simple,
632 test_stats_auto_flush,
633 test_stats_manual_flush);
634}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/seq.c b/examples/redis-unstable/deps/jemalloc/test/unit/seq.c
deleted file mode 100644
index 06ed683..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/seq.c
+++ /dev/null
@@ -1,95 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/seq.h"
4
5typedef struct data_s data_t;
6struct data_s {
7 int arr[10];
8};
9
10static void
11set_data(data_t *data, int num) {
12 for (int i = 0; i < 10; i++) {
13 data->arr[i] = num;
14 }
15}
16
17static void
18expect_data(data_t *data) {
19 int num = data->arr[0];
20 for (int i = 0; i < 10; i++) {
21 expect_d_eq(num, data->arr[i], "Data consistency error");
22 }
23}
24
25seq_define(data_t, data)
26
27typedef struct thd_data_s thd_data_t;
28struct thd_data_s {
29 seq_data_t data;
30};
31
32static void *
33seq_reader_thd(void *arg) {
34 thd_data_t *thd_data = (thd_data_t *)arg;
35 int iter = 0;
36 data_t local_data;
37 while (iter < 1000 * 1000 - 1) {
38 bool success = seq_try_load_data(&local_data, &thd_data->data);
39 if (success) {
40 expect_data(&local_data);
41 expect_d_le(iter, local_data.arr[0],
42 "Seq read went back in time.");
43 iter = local_data.arr[0];
44 }
45 }
46 return NULL;
47}
48
49static void *
50seq_writer_thd(void *arg) {
51 thd_data_t *thd_data = (thd_data_t *)arg;
52 data_t local_data;
53 memset(&local_data, 0, sizeof(local_data));
54 for (int i = 0; i < 1000 * 1000; i++) {
55 set_data(&local_data, i);
56 seq_store_data(&thd_data->data, &local_data);
57 }
58 return NULL;
59}
60
61TEST_BEGIN(test_seq_threaded) {
62 thd_data_t thd_data;
63 memset(&thd_data, 0, sizeof(thd_data));
64
65 thd_t reader;
66 thd_t writer;
67
68 thd_create(&reader, seq_reader_thd, &thd_data);
69 thd_create(&writer, seq_writer_thd, &thd_data);
70
71 thd_join(reader, NULL);
72 thd_join(writer, NULL);
73}
74TEST_END
75
76TEST_BEGIN(test_seq_simple) {
77 data_t data;
78 seq_data_t seq;
79 memset(&seq, 0, sizeof(seq));
80 for (int i = 0; i < 1000 * 1000; i++) {
81 set_data(&data, i);
82 seq_store_data(&seq, &data);
83 set_data(&data, 0);
84 bool success = seq_try_load_data(&data, &seq);
85 expect_b_eq(success, true, "Failed non-racing read");
86 expect_data(&data);
87 }
88}
89TEST_END
90
91int main(void) {
92 return test_no_reentrancy(
93 test_seq_simple,
94 test_seq_threaded);
95}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/size_check.c b/examples/redis-unstable/deps/jemalloc/test/unit/size_check.c
deleted file mode 100644
index accdc40..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/size_check.c
+++ /dev/null
@@ -1,79 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/safety_check.h"
4
5bool fake_abort_called;
6void fake_abort(const char *message) {
7 (void)message;
8 fake_abort_called = true;
9}
10
11#define SMALL_SIZE1 SC_SMALL_MAXCLASS
12#define SMALL_SIZE2 (SC_SMALL_MAXCLASS / 2)
13
14#define LARGE_SIZE1 SC_LARGE_MINCLASS
15#define LARGE_SIZE2 (LARGE_SIZE1 * 2)
16
17void *
18test_invalid_size_pre(size_t sz) {
19 safety_check_set_abort(&fake_abort);
20
21 fake_abort_called = false;
22 void *ptr = malloc(sz);
23 assert_ptr_not_null(ptr, "Unexpected failure");
24
25 return ptr;
26}
27
28void
29test_invalid_size_post(void) {
30 expect_true(fake_abort_called, "Safety check didn't fire");
31 safety_check_set_abort(NULL);
32}
33
34TEST_BEGIN(test_invalid_size_sdallocx) {
35 test_skip_if(!config_opt_size_checks);
36
37 void *ptr = test_invalid_size_pre(SMALL_SIZE1);
38 sdallocx(ptr, SMALL_SIZE2, 0);
39 test_invalid_size_post();
40
41 ptr = test_invalid_size_pre(LARGE_SIZE1);
42 sdallocx(ptr, LARGE_SIZE2, 0);
43 test_invalid_size_post();
44}
45TEST_END
46
47TEST_BEGIN(test_invalid_size_sdallocx_nonzero_flag) {
48 test_skip_if(!config_opt_size_checks);
49
50 void *ptr = test_invalid_size_pre(SMALL_SIZE1);
51 sdallocx(ptr, SMALL_SIZE2, MALLOCX_TCACHE_NONE);
52 test_invalid_size_post();
53
54 ptr = test_invalid_size_pre(LARGE_SIZE1);
55 sdallocx(ptr, LARGE_SIZE2, MALLOCX_TCACHE_NONE);
56 test_invalid_size_post();
57}
58TEST_END
59
60TEST_BEGIN(test_invalid_size_sdallocx_noflags) {
61 test_skip_if(!config_opt_size_checks);
62
63 void *ptr = test_invalid_size_pre(SMALL_SIZE1);
64 je_sdallocx_noflags(ptr, SMALL_SIZE2);
65 test_invalid_size_post();
66
67 ptr = test_invalid_size_pre(LARGE_SIZE1);
68 je_sdallocx_noflags(ptr, LARGE_SIZE2);
69 test_invalid_size_post();
70}
71TEST_END
72
73int
74main(void) {
75 return test(
76 test_invalid_size_sdallocx,
77 test_invalid_size_sdallocx_nonzero_flag,
78 test_invalid_size_sdallocx_noflags);
79}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/size_check.sh b/examples/redis-unstable/deps/jemalloc/test/unit/size_check.sh
deleted file mode 100644
index 352d110..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/size_check.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:false"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/size_classes.c b/examples/redis-unstable/deps/jemalloc/test/unit/size_classes.c
deleted file mode 100644
index c70eb59..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/size_classes.c
+++ /dev/null
@@ -1,188 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static size_t
4get_max_size_class(void) {
5 unsigned nlextents;
6 size_t mib[4];
7 size_t sz, miblen, max_size_class;
8
9 sz = sizeof(unsigned);
10 expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
11 0), 0, "Unexpected mallctl() error");
12
13 miblen = sizeof(mib) / sizeof(size_t);
14 expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
15 "Unexpected mallctlnametomib() error");
16 mib[2] = nlextents - 1;
17
18 sz = sizeof(size_t);
19 expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
20 NULL, 0), 0, "Unexpected mallctlbymib() error");
21
22 return max_size_class;
23}
24
25TEST_BEGIN(test_size_classes) {
26 size_t size_class, max_size_class;
27 szind_t index, max_index;
28
29 max_size_class = get_max_size_class();
30 max_index = sz_size2index(max_size_class);
31
32 for (index = 0, size_class = sz_index2size(index); index < max_index ||
33 size_class < max_size_class; index++, size_class =
34 sz_index2size(index)) {
35 expect_true(index < max_index,
36 "Loop conditionals should be equivalent; index=%u, "
37 "size_class=%zu (%#zx)", index, size_class, size_class);
38 expect_true(size_class < max_size_class,
39 "Loop conditionals should be equivalent; index=%u, "
40 "size_class=%zu (%#zx)", index, size_class, size_class);
41
42 expect_u_eq(index, sz_size2index(size_class),
43 "sz_size2index() does not reverse sz_index2size(): index=%u"
44 " --> size_class=%zu --> index=%u --> size_class=%zu",
45 index, size_class, sz_size2index(size_class),
46 sz_index2size(sz_size2index(size_class)));
47 expect_zu_eq(size_class,
48 sz_index2size(sz_size2index(size_class)),
49 "sz_index2size() does not reverse sz_size2index(): index=%u"
50 " --> size_class=%zu --> index=%u --> size_class=%zu",
51 index, size_class, sz_size2index(size_class),
52 sz_index2size(sz_size2index(size_class)));
53
54 expect_u_eq(index+1, sz_size2index(size_class+1),
55 "Next size_class does not round up properly");
56
57 expect_zu_eq(size_class, (index > 0) ?
58 sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1),
59 "sz_s2u() does not round up to size class");
60 expect_zu_eq(size_class, sz_s2u(size_class-1),
61 "sz_s2u() does not round up to size class");
62 expect_zu_eq(size_class, sz_s2u(size_class),
63 "sz_s2u() does not compute same size class");
64 expect_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
65 "sz_s2u() does not round up to next size class");
66 }
67
68 expect_u_eq(index, sz_size2index(sz_index2size(index)),
69 "sz_size2index() does not reverse sz_index2size()");
70 expect_zu_eq(max_size_class, sz_index2size(
71 sz_size2index(max_size_class)),
72 "sz_index2size() does not reverse sz_size2index()");
73
74 expect_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
75 "sz_s2u() does not round up to size class");
76 expect_zu_eq(size_class, sz_s2u(size_class-1),
77 "sz_s2u() does not round up to size class");
78 expect_zu_eq(size_class, sz_s2u(size_class),
79 "sz_s2u() does not compute same size class");
80}
81TEST_END
82
83TEST_BEGIN(test_psize_classes) {
84 size_t size_class, max_psz;
85 pszind_t pind, max_pind;
86
87 max_psz = get_max_size_class() + PAGE;
88 max_pind = sz_psz2ind(max_psz);
89
90 for (pind = 0, size_class = sz_pind2sz(pind);
91 pind < max_pind || size_class < max_psz;
92 pind++, size_class = sz_pind2sz(pind)) {
93 expect_true(pind < max_pind,
94 "Loop conditionals should be equivalent; pind=%u, "
95 "size_class=%zu (%#zx)", pind, size_class, size_class);
96 expect_true(size_class < max_psz,
97 "Loop conditionals should be equivalent; pind=%u, "
98 "size_class=%zu (%#zx)", pind, size_class, size_class);
99
100 expect_u_eq(pind, sz_psz2ind(size_class),
101 "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->"
102 " size_class=%zu --> pind=%u --> size_class=%zu", pind,
103 size_class, sz_psz2ind(size_class),
104 sz_pind2sz(sz_psz2ind(size_class)));
105 expect_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
106 "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->"
107 " size_class=%zu --> pind=%u --> size_class=%zu", pind,
108 size_class, sz_psz2ind(size_class),
109 sz_pind2sz(sz_psz2ind(size_class)));
110
111 if (size_class == SC_LARGE_MAXCLASS) {
112 expect_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
113 "Next size_class does not round up properly");
114 } else {
115 expect_u_eq(pind + 1, sz_psz2ind(size_class + 1),
116 "Next size_class does not round up properly");
117 }
118
119 expect_zu_eq(size_class, (pind > 0) ?
120 sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
121 "sz_psz2u() does not round up to size class");
122 expect_zu_eq(size_class, sz_psz2u(size_class-1),
123 "sz_psz2u() does not round up to size class");
124 expect_zu_eq(size_class, sz_psz2u(size_class),
125 "sz_psz2u() does not compute same size class");
126 expect_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
127 "sz_psz2u() does not round up to next size class");
128 }
129
130 expect_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
131 "sz_psz2ind() does not reverse sz_pind2sz()");
132 expect_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
133 "sz_pind2sz() does not reverse sz_psz2ind()");
134
135 expect_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
136 "sz_psz2u() does not round up to size class");
137 expect_zu_eq(size_class, sz_psz2u(size_class-1),
138 "sz_psz2u() does not round up to size class");
139 expect_zu_eq(size_class, sz_psz2u(size_class),
140 "sz_psz2u() does not compute same size class");
141}
142TEST_END
143
144TEST_BEGIN(test_overflow) {
145 size_t max_size_class, max_psz;
146
147 max_size_class = get_max_size_class();
148 max_psz = max_size_class + PAGE;
149
150 expect_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
151 "sz_size2index() should return NSIZES on overflow");
152 expect_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
153 "sz_size2index() should return NSIZES on overflow");
154 expect_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
155 "sz_size2index() should return NSIZES on overflow");
156
157 expect_zu_eq(sz_s2u(max_size_class+1), 0,
158 "sz_s2u() should return 0 for unsupported size");
159 expect_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
160 "sz_s2u() should return 0 for unsupported size");
161 expect_zu_eq(sz_s2u(SIZE_T_MAX), 0,
162 "sz_s2u() should return 0 on overflow");
163
164 expect_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
165 "sz_psz2ind() should return NPSIZES on overflow");
166 expect_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
167 "sz_psz2ind() should return NPSIZES on overflow");
168 expect_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
169 "sz_psz2ind() should return NPSIZES on overflow");
170
171 expect_zu_eq(sz_psz2u(max_size_class+1), max_psz,
172 "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
173 " size");
174 expect_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
175 "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
176 "size");
177 expect_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
178 "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
179}
180TEST_END
181
182int
183main(void) {
184 return test(
185 test_size_classes,
186 test_psize_classes,
187 test_overflow);
188}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/slab.c b/examples/redis-unstable/deps/jemalloc/test/unit/slab.c
deleted file mode 100644
index 70fc5c7..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/slab.c
+++ /dev/null
@@ -1,39 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
4
5TEST_BEGIN(test_arena_slab_regind) {
6 szind_t binind;
7
8 for (binind = 0; binind < SC_NBINS; binind++) {
9 size_t regind;
10 edata_t slab;
11 const bin_info_t *bin_info = &bin_infos[binind];
12 edata_init(&slab, INVALID_ARENA_IND,
13 mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
14 bin_info->slab_size, true,
15 binind, 0, extent_state_active, false, true, EXTENT_PAI_PAC,
16 EXTENT_NOT_HEAD);
17 expect_ptr_not_null(edata_addr_get(&slab),
18 "Unexpected malloc() failure");
19 arena_dalloc_bin_locked_info_t dalloc_info;
20 arena_dalloc_bin_locked_begin(&dalloc_info, binind);
21 for (regind = 0; regind < bin_info->nregs; regind++) {
22 void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
23 (bin_info->reg_size * regind));
24 expect_zu_eq(arena_slab_regind(&dalloc_info, binind,
25 &slab, reg),
26 regind,
27 "Incorrect region index computed for size %zu",
28 bin_info->reg_size);
29 }
30 free(edata_addr_get(&slab));
31 }
32}
33TEST_END
34
35int
36main(void) {
37 return test(
38 test_arena_slab_regind);
39}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/smoothstep.c b/examples/redis-unstable/deps/jemalloc/test/unit/smoothstep.c
deleted file mode 100644
index 588c9f4..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/smoothstep.c
+++ /dev/null
@@ -1,102 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static const uint64_t smoothstep_tab[] = {
4#define STEP(step, h, x, y) \
5 h,
6 SMOOTHSTEP
7#undef STEP
8};
9
10TEST_BEGIN(test_smoothstep_integral) {
11 uint64_t sum, min, max;
12 unsigned i;
13
14 /*
15 * The integral of smoothstep in the [0..1] range equals 1/2. Verify
16 * that the fixed point representation's integral is no more than
17 * rounding error distant from 1/2. Regarding rounding, each table
18 * element is rounded down to the nearest fixed point value, so the
19 * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
20 */
21 sum = 0;
22 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
23 sum += smoothstep_tab[i];
24 }
25
26 max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
27 min = max - SMOOTHSTEP_NSTEPS;
28
29 expect_u64_ge(sum, min,
30 "Integral too small, even accounting for truncation");
31 expect_u64_le(sum, max, "Integral exceeds 1/2");
32 if (false) {
33 malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
34 max - sum, SMOOTHSTEP_NSTEPS);
35 }
36}
37TEST_END
38
39TEST_BEGIN(test_smoothstep_monotonic) {
40 uint64_t prev_h;
41 unsigned i;
42
43 /*
44 * The smoothstep function is monotonic in [0..1], i.e. its slope is
45 * non-negative. In practice we want to parametrize table generation
46 * such that piecewise slope is greater than zero, but do not require
47 * that here.
48 */
49 prev_h = 0;
50 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
51 uint64_t h = smoothstep_tab[i];
52 expect_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
53 prev_h = h;
54 }
55 expect_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
56 (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
57}
58TEST_END
59
60TEST_BEGIN(test_smoothstep_slope) {
61 uint64_t prev_h, prev_delta;
62 unsigned i;
63
64 /*
65 * The smoothstep slope strictly increases until x=0.5, and then
66 * strictly decreases until x=1.0. Verify the slightly weaker
67 * requirement of monotonicity, so that inadequate table precision does
68 * not cause false test failures.
69 */
70 prev_h = 0;
71 prev_delta = 0;
72 for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
73 uint64_t h = smoothstep_tab[i];
74 uint64_t delta = h - prev_h;
75 expect_u64_ge(delta, prev_delta,
76 "Slope must monotonically increase in 0.0 <= x <= 0.5, "
77 "i=%u", i);
78 prev_h = h;
79 prev_delta = delta;
80 }
81
82 prev_h = KQU(1) << SMOOTHSTEP_BFP;
83 prev_delta = 0;
84 for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
85 uint64_t h = smoothstep_tab[i];
86 uint64_t delta = prev_h - h;
87 expect_u64_ge(delta, prev_delta,
88 "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
89 "i=%u", i);
90 prev_h = h;
91 prev_delta = delta;
92 }
93}
94TEST_END
95
96int
97main(void) {
98 return test(
99 test_smoothstep_integral,
100 test_smoothstep_monotonic,
101 test_smoothstep_slope);
102}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/spin.c b/examples/redis-unstable/deps/jemalloc/test/unit/spin.c
deleted file mode 100644
index b965f74..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/spin.c
+++ /dev/null
@@ -1,18 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/spin.h"
4
5TEST_BEGIN(test_spin) {
6 spin_t spinner = SPIN_INITIALIZER;
7
8 for (unsigned i = 0; i < 100; i++) {
9 spin_adaptive(&spinner);
10 }
11}
12TEST_END
13
14int
15main(void) {
16 return test(
17 test_spin);
18}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/stats.c b/examples/redis-unstable/deps/jemalloc/test/unit/stats.c
deleted file mode 100644
index bbdbd18..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/stats.c
+++ /dev/null
@@ -1,431 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#define STRINGIFY_HELPER(x) #x
4#define STRINGIFY(x) STRINGIFY_HELPER(x)
5
6TEST_BEGIN(test_stats_summary) {
7 size_t sz, allocated, active, resident, mapped;
8 int expected = config_stats ? 0 : ENOENT;
9
10 sz = sizeof(size_t);
11 expect_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
12 0), expected, "Unexpected mallctl() result");
13 expect_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
14 expected, "Unexpected mallctl() result");
15 expect_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
16 expected, "Unexpected mallctl() result");
17 expect_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
18 expected, "Unexpected mallctl() result");
19
20 if (config_stats) {
21 expect_zu_le(allocated, active,
22 "allocated should be no larger than active");
23 expect_zu_lt(active, resident,
24 "active should be less than resident");
25 expect_zu_lt(active, mapped,
26 "active should be less than mapped");
27 }
28}
29TEST_END
30
31TEST_BEGIN(test_stats_large) {
32 void *p;
33 uint64_t epoch;
34 size_t allocated;
35 uint64_t nmalloc, ndalloc, nrequests;
36 size_t sz;
37 int expected = config_stats ? 0 : ENOENT;
38
39 p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
40 expect_ptr_not_null(p, "Unexpected mallocx() failure");
41
42 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
43 0, "Unexpected mallctl() failure");
44
45 sz = sizeof(size_t);
46 expect_d_eq(mallctl("stats.arenas.0.large.allocated",
47 (void *)&allocated, &sz, NULL, 0), expected,
48 "Unexpected mallctl() result");
49 sz = sizeof(uint64_t);
50 expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
51 &sz, NULL, 0), expected, "Unexpected mallctl() result");
52 expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
53 &sz, NULL, 0), expected, "Unexpected mallctl() result");
54 expect_d_eq(mallctl("stats.arenas.0.large.nrequests",
55 (void *)&nrequests, &sz, NULL, 0), expected,
56 "Unexpected mallctl() result");
57
58 if (config_stats) {
59 expect_zu_gt(allocated, 0,
60 "allocated should be greater than zero");
61 expect_u64_ge(nmalloc, ndalloc,
62 "nmalloc should be at least as large as ndalloc");
63 expect_u64_le(nmalloc, nrequests,
64 "nmalloc should no larger than nrequests");
65 }
66
67 dallocx(p, 0);
68}
69TEST_END
70
71TEST_BEGIN(test_stats_arenas_summary) {
72 void *little, *large;
73 uint64_t epoch;
74 size_t sz;
75 int expected = config_stats ? 0 : ENOENT;
76 size_t mapped;
77 uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
78 uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
79
80 little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
81 expect_ptr_not_null(little, "Unexpected mallocx() failure");
82 large = mallocx((1U << SC_LG_LARGE_MINCLASS),
83 MALLOCX_ARENA(0));
84 expect_ptr_not_null(large, "Unexpected mallocx() failure");
85
86 dallocx(little, 0);
87 dallocx(large, 0);
88
89 expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
90 opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
91 expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
92 "Unexpected mallctl() failure");
93
94 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
95 0, "Unexpected mallctl() failure");
96
97 sz = sizeof(size_t);
98 expect_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
99 0), expected, "Unexepected mallctl() result");
100
101 sz = sizeof(uint64_t);
102 expect_d_eq(mallctl("stats.arenas.0.dirty_npurge",
103 (void *)&dirty_npurge, &sz, NULL, 0), expected,
104 "Unexepected mallctl() result");
105 expect_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
106 (void *)&dirty_nmadvise, &sz, NULL, 0), expected,
107 "Unexepected mallctl() result");
108 expect_d_eq(mallctl("stats.arenas.0.dirty_purged",
109 (void *)&dirty_purged, &sz, NULL, 0), expected,
110 "Unexepected mallctl() result");
111 expect_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
112 (void *)&muzzy_npurge, &sz, NULL, 0), expected,
113 "Unexepected mallctl() result");
114 expect_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
115 (void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
116 "Unexepected mallctl() result");
117 expect_d_eq(mallctl("stats.arenas.0.muzzy_purged",
118 (void *)&muzzy_purged, &sz, NULL, 0), expected,
119 "Unexepected mallctl() result");
120
121 if (config_stats) {
122 if (!is_background_thread_enabled() && !opt_hpa) {
123 expect_u64_gt(dirty_npurge + muzzy_npurge, 0,
124 "At least one purge should have occurred");
125 }
126 expect_u64_le(dirty_nmadvise, dirty_purged,
127 "dirty_nmadvise should be no greater than dirty_purged");
128 expect_u64_le(muzzy_nmadvise, muzzy_purged,
129 "muzzy_nmadvise should be no greater than muzzy_purged");
130 }
131}
132TEST_END
133
134void *
135thd_start(void *arg) {
136 return NULL;
137}
138
139static void
140no_lazy_lock(void) {
141 thd_t thd;
142
143 thd_create(&thd, thd_start, NULL);
144 thd_join(thd, NULL);
145}
146
147TEST_BEGIN(test_stats_arenas_small) {
148 void *p;
149 size_t sz, allocated;
150 uint64_t epoch, nmalloc, ndalloc, nrequests;
151 int expected = config_stats ? 0 : ENOENT;
152
153 no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
154
155 p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
156 expect_ptr_not_null(p, "Unexpected mallocx() failure");
157
158 expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
159 opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
160
161 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
162 0, "Unexpected mallctl() failure");
163
164 sz = sizeof(size_t);
165 expect_d_eq(mallctl("stats.arenas.0.small.allocated",
166 (void *)&allocated, &sz, NULL, 0), expected,
167 "Unexpected mallctl() result");
168 sz = sizeof(uint64_t);
169 expect_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
170 &sz, NULL, 0), expected, "Unexpected mallctl() result");
171 expect_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
172 &sz, NULL, 0), expected, "Unexpected mallctl() result");
173 expect_d_eq(mallctl("stats.arenas.0.small.nrequests",
174 (void *)&nrequests, &sz, NULL, 0), expected,
175 "Unexpected mallctl() result");
176
177 if (config_stats) {
178 expect_zu_gt(allocated, 0,
179 "allocated should be greater than zero");
180 expect_u64_gt(nmalloc, 0,
181 "nmalloc should be no greater than zero");
182 expect_u64_ge(nmalloc, ndalloc,
183 "nmalloc should be at least as large as ndalloc");
184 expect_u64_gt(nrequests, 0,
185 "nrequests should be greater than zero");
186 }
187
188 dallocx(p, 0);
189}
190TEST_END
191
192TEST_BEGIN(test_stats_arenas_large) {
193 void *p;
194 size_t sz, allocated;
195 uint64_t epoch, nmalloc, ndalloc;
196 int expected = config_stats ? 0 : ENOENT;
197
198 p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
199 expect_ptr_not_null(p, "Unexpected mallocx() failure");
200
201 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
202 0, "Unexpected mallctl() failure");
203
204 sz = sizeof(size_t);
205 expect_d_eq(mallctl("stats.arenas.0.large.allocated",
206 (void *)&allocated, &sz, NULL, 0), expected,
207 "Unexpected mallctl() result");
208 sz = sizeof(uint64_t);
209 expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
210 &sz, NULL, 0), expected, "Unexpected mallctl() result");
211 expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
212 &sz, NULL, 0), expected, "Unexpected mallctl() result");
213
214 if (config_stats) {
215 expect_zu_gt(allocated, 0,
216 "allocated should be greater than zero");
217 expect_u64_gt(nmalloc, 0,
218 "nmalloc should be greater than zero");
219 expect_u64_ge(nmalloc, ndalloc,
220 "nmalloc should be at least as large as ndalloc");
221 }
222
223 dallocx(p, 0);
224}
225TEST_END
226
227static void
228gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) {
229 sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name);
230}
231
232TEST_BEGIN(test_stats_arenas_bins) {
233 void *p;
234 size_t sz, curslabs, curregs, nonfull_slabs;
235 uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
236 uint64_t nslabs, nreslabs;
237 int expected = config_stats ? 0 : ENOENT;
238
239 /* Make sure allocation below isn't satisfied by tcache. */
240 expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
241 opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
242
243 unsigned arena_ind, old_arena_ind;
244 sz = sizeof(unsigned);
245 expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
246 0, "Arena creation failure");
247 sz = sizeof(arena_ind);
248 expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
249 (void *)&arena_ind, sizeof(arena_ind)), 0,
250 "Unexpected mallctl() failure");
251
252 p = malloc(bin_infos[0].reg_size);
253 expect_ptr_not_null(p, "Unexpected malloc() failure");
254
255 expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
256 opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
257
258 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
259 0, "Unexpected mallctl() failure");
260
261 char cmd[128];
262 sz = sizeof(uint64_t);
263 gen_mallctl_str(cmd, "nmalloc", arena_ind);
264 expect_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
265 "Unexpected mallctl() result");
266 gen_mallctl_str(cmd, "ndalloc", arena_ind);
267 expect_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
268 "Unexpected mallctl() result");
269 gen_mallctl_str(cmd, "nrequests", arena_ind);
270 expect_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
271 "Unexpected mallctl() result");
272 sz = sizeof(size_t);
273 gen_mallctl_str(cmd, "curregs", arena_ind);
274 expect_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
275 "Unexpected mallctl() result");
276
277 sz = sizeof(uint64_t);
278 gen_mallctl_str(cmd, "nfills", arena_ind);
279 expect_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
280 "Unexpected mallctl() result");
281 gen_mallctl_str(cmd, "nflushes", arena_ind);
282 expect_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
283 "Unexpected mallctl() result");
284
285 gen_mallctl_str(cmd, "nslabs", arena_ind);
286 expect_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
287 "Unexpected mallctl() result");
288 gen_mallctl_str(cmd, "nreslabs", arena_ind);
289 expect_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
290 "Unexpected mallctl() result");
291 sz = sizeof(size_t);
292 gen_mallctl_str(cmd, "curslabs", arena_ind);
293 expect_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
294 "Unexpected mallctl() result");
295 gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
296 expect_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
297 expected, "Unexpected mallctl() result");
298
299 if (config_stats) {
300 expect_u64_gt(nmalloc, 0,
301 "nmalloc should be greater than zero");
302 expect_u64_ge(nmalloc, ndalloc,
303 "nmalloc should be at least as large as ndalloc");
304 expect_u64_gt(nrequests, 0,
305 "nrequests should be greater than zero");
306 expect_zu_gt(curregs, 0,
307 "allocated should be greater than zero");
308 if (opt_tcache) {
309 expect_u64_gt(nfills, 0,
310 "At least one fill should have occurred");
311 expect_u64_gt(nflushes, 0,
312 "At least one flush should have occurred");
313 }
314 expect_u64_gt(nslabs, 0,
315 "At least one slab should have been allocated");
316 expect_zu_gt(curslabs, 0,
317 "At least one slab should be currently allocated");
318 expect_zu_eq(nonfull_slabs, 0,
319 "slabs_nonfull should be empty");
320 }
321
322 dallocx(p, 0);
323}
324TEST_END
325
326TEST_BEGIN(test_stats_arenas_lextents) {
327 void *p;
328 uint64_t epoch, nmalloc, ndalloc;
329 size_t curlextents, sz, hsize;
330 int expected = config_stats ? 0 : ENOENT;
331
332 sz = sizeof(size_t);
333 expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
334 0), 0, "Unexpected mallctl() failure");
335
336 p = mallocx(hsize, MALLOCX_ARENA(0));
337 expect_ptr_not_null(p, "Unexpected mallocx() failure");
338
339 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
340 0, "Unexpected mallctl() failure");
341
342 sz = sizeof(uint64_t);
343 expect_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
344 (void *)&nmalloc, &sz, NULL, 0), expected,
345 "Unexpected mallctl() result");
346 expect_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
347 (void *)&ndalloc, &sz, NULL, 0), expected,
348 "Unexpected mallctl() result");
349 sz = sizeof(size_t);
350 expect_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
351 (void *)&curlextents, &sz, NULL, 0), expected,
352 "Unexpected mallctl() result");
353
354 if (config_stats) {
355 expect_u64_gt(nmalloc, 0,
356 "nmalloc should be greater than zero");
357 expect_u64_ge(nmalloc, ndalloc,
358 "nmalloc should be at least as large as ndalloc");
359 expect_u64_gt(curlextents, 0,
360 "At least one extent should be currently allocated");
361 }
362
363 dallocx(p, 0);
364}
365TEST_END
366
367static void
368test_tcache_bytes_for_usize(size_t usize) {
369 uint64_t epoch;
370 size_t tcache_bytes, tcache_stashed_bytes;
371 size_t sz = sizeof(tcache_bytes);
372
373 void *ptr = mallocx(usize, 0);
374
375 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
376 0, "Unexpected mallctl() failure");
377 assert_d_eq(mallctl(
378 "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
379 &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
380 assert_d_eq(mallctl(
381 "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
382 ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
383 "Unexpected mallctl failure");
384 size_t tcache_bytes_before = tcache_bytes + tcache_stashed_bytes;
385 dallocx(ptr, 0);
386
387 expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
388 0, "Unexpected mallctl() failure");
389 assert_d_eq(mallctl(
390 "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
391 &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
392 assert_d_eq(mallctl(
393 "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
394 ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
395 "Unexpected mallctl failure");
396 size_t tcache_bytes_after = tcache_bytes + tcache_stashed_bytes;
397 assert_zu_eq(tcache_bytes_after - tcache_bytes_before,
398 usize, "Incorrectly attributed a free");
399}
400
401TEST_BEGIN(test_stats_tcache_bytes_small) {
402 test_skip_if(!config_stats);
403 test_skip_if(!opt_tcache);
404 test_skip_if(opt_tcache_max < SC_SMALL_MAXCLASS);
405
406 test_tcache_bytes_for_usize(SC_SMALL_MAXCLASS);
407}
408TEST_END
409
410TEST_BEGIN(test_stats_tcache_bytes_large) {
411 test_skip_if(!config_stats);
412 test_skip_if(!opt_tcache);
413 test_skip_if(opt_tcache_max < SC_LARGE_MINCLASS);
414
415 test_tcache_bytes_for_usize(SC_LARGE_MINCLASS);
416}
417TEST_END
418
419int
420main(void) {
421 return test_no_reentrancy(
422 test_stats_summary,
423 test_stats_large,
424 test_stats_arenas_summary,
425 test_stats_arenas_small,
426 test_stats_arenas_large,
427 test_stats_arenas_bins,
428 test_stats_arenas_lextents,
429 test_stats_tcache_bytes_small,
430 test_stats_tcache_bytes_large);
431}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/stats_print.c b/examples/redis-unstable/deps/jemalloc/test/unit/stats_print.c
deleted file mode 100644
index 3b31775..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/stats_print.c
+++ /dev/null
@@ -1,999 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/util.h"
4
5typedef enum {
6 TOKEN_TYPE_NONE,
7 TOKEN_TYPE_ERROR,
8 TOKEN_TYPE_EOI,
9 TOKEN_TYPE_NULL,
10 TOKEN_TYPE_FALSE,
11 TOKEN_TYPE_TRUE,
12 TOKEN_TYPE_LBRACKET,
13 TOKEN_TYPE_RBRACKET,
14 TOKEN_TYPE_LBRACE,
15 TOKEN_TYPE_RBRACE,
16 TOKEN_TYPE_COLON,
17 TOKEN_TYPE_COMMA,
18 TOKEN_TYPE_STRING,
19 TOKEN_TYPE_NUMBER
20} token_type_t;
21
22typedef struct parser_s parser_t;
23typedef struct {
24 parser_t *parser;
25 token_type_t token_type;
26 size_t pos;
27 size_t len;
28 size_t line;
29 size_t col;
30} token_t;
31
32struct parser_s {
33 bool verbose;
34 char *buf; /* '\0'-terminated. */
35 size_t len; /* Number of characters preceding '\0' in buf. */
36 size_t pos;
37 size_t line;
38 size_t col;
39 token_t token;
40};
41
42static void
43token_init(token_t *token, parser_t *parser, token_type_t token_type,
44 size_t pos, size_t len, size_t line, size_t col) {
45 token->parser = parser;
46 token->token_type = token_type;
47 token->pos = pos;
48 token->len = len;
49 token->line = line;
50 token->col = col;
51}
52
53static void
54token_error(token_t *token) {
55 if (!token->parser->verbose) {
56 return;
57 }
58 switch (token->token_type) {
59 case TOKEN_TYPE_NONE:
60 not_reached();
61 case TOKEN_TYPE_ERROR:
62 malloc_printf("%zu:%zu: Unexpected character in token: ",
63 token->line, token->col);
64 break;
65 default:
66 malloc_printf("%zu:%zu: Unexpected token: ", token->line,
67 token->col);
68 break;
69 }
70 UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO,
71 &token->parser->buf[token->pos], token->len);
72 malloc_printf("\n");
73}
74
75static void
76parser_init(parser_t *parser, bool verbose) {
77 parser->verbose = verbose;
78 parser->buf = NULL;
79 parser->len = 0;
80 parser->pos = 0;
81 parser->line = 1;
82 parser->col = 0;
83}
84
85static void
86parser_fini(parser_t *parser) {
87 if (parser->buf != NULL) {
88 dallocx(parser->buf, MALLOCX_TCACHE_NONE);
89 }
90}
91
92static bool
93parser_append(parser_t *parser, const char *str) {
94 size_t len = strlen(str);
95 char *buf = (parser->buf == NULL) ? mallocx(len + 1,
96 MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1,
97 MALLOCX_TCACHE_NONE);
98 if (buf == NULL) {
99 return true;
100 }
101 memcpy(&buf[parser->len], str, len + 1);
102 parser->buf = buf;
103 parser->len += len;
104 return false;
105}
106
107static bool
108parser_tokenize(parser_t *parser) {
109 enum {
110 STATE_START,
111 STATE_EOI,
112 STATE_N, STATE_NU, STATE_NUL, STATE_NULL,
113 STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE,
114 STATE_T, STATE_TR, STATE_TRU, STATE_TRUE,
115 STATE_LBRACKET,
116 STATE_RBRACKET,
117 STATE_LBRACE,
118 STATE_RBRACE,
119 STATE_COLON,
120 STATE_COMMA,
121 STATE_CHARS,
122 STATE_CHAR_ESCAPE,
123 STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD,
124 STATE_STRING,
125 STATE_MINUS,
126 STATE_LEADING_ZERO,
127 STATE_DIGITS,
128 STATE_DECIMAL,
129 STATE_FRAC_DIGITS,
130 STATE_EXP,
131 STATE_EXP_SIGN,
132 STATE_EXP_DIGITS,
133 STATE_ACCEPT
134 } state = STATE_START;
135 size_t token_pos JEMALLOC_CC_SILENCE_INIT(0);
136 size_t token_line JEMALLOC_CC_SILENCE_INIT(1);
137 size_t token_col JEMALLOC_CC_SILENCE_INIT(0);
138
139 expect_zu_le(parser->pos, parser->len,
140 "Position is past end of buffer");
141
142 while (state != STATE_ACCEPT) {
143 char c = parser->buf[parser->pos];
144
145 switch (state) {
146 case STATE_START:
147 token_pos = parser->pos;
148 token_line = parser->line;
149 token_col = parser->col;
150 switch (c) {
151 case ' ': case '\b': case '\n': case '\r': case '\t':
152 break;
153 case '\0':
154 state = STATE_EOI;
155 break;
156 case 'n':
157 state = STATE_N;
158 break;
159 case 'f':
160 state = STATE_F;
161 break;
162 case 't':
163 state = STATE_T;
164 break;
165 case '[':
166 state = STATE_LBRACKET;
167 break;
168 case ']':
169 state = STATE_RBRACKET;
170 break;
171 case '{':
172 state = STATE_LBRACE;
173 break;
174 case '}':
175 state = STATE_RBRACE;
176 break;
177 case ':':
178 state = STATE_COLON;
179 break;
180 case ',':
181 state = STATE_COMMA;
182 break;
183 case '"':
184 state = STATE_CHARS;
185 break;
186 case '-':
187 state = STATE_MINUS;
188 break;
189 case '0':
190 state = STATE_LEADING_ZERO;
191 break;
192 case '1': case '2': case '3': case '4':
193 case '5': case '6': case '7': case '8': case '9':
194 state = STATE_DIGITS;
195 break;
196 default:
197 token_init(&parser->token, parser,
198 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
199 - token_pos, token_line, token_col);
200 return true;
201 }
202 break;
203 case STATE_EOI:
204 token_init(&parser->token, parser,
205 TOKEN_TYPE_EOI, token_pos, parser->pos -
206 token_pos, token_line, token_col);
207 state = STATE_ACCEPT;
208 break;
209 case STATE_N:
210 switch (c) {
211 case 'u':
212 state = STATE_NU;
213 break;
214 default:
215 token_init(&parser->token, parser,
216 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
217 - token_pos, token_line, token_col);
218 return true;
219 }
220 break;
221 case STATE_NU:
222 switch (c) {
223 case 'l':
224 state = STATE_NUL;
225 break;
226 default:
227 token_init(&parser->token, parser,
228 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
229 - token_pos, token_line, token_col);
230 return true;
231 }
232 break;
233 case STATE_NUL:
234 switch (c) {
235 case 'l':
236 state = STATE_NULL;
237 break;
238 default:
239 token_init(&parser->token, parser,
240 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
241 - token_pos, token_line, token_col);
242 return true;
243 }
244 break;
245 case STATE_NULL:
246 switch (c) {
247 case ' ': case '\b': case '\n': case '\r': case '\t':
248 case '\0':
249 case '[': case ']': case '{': case '}': case ':':
250 case ',':
251 break;
252 default:
253 token_init(&parser->token, parser,
254 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
255 - token_pos, token_line, token_col);
256 return true;
257 }
258 token_init(&parser->token, parser, TOKEN_TYPE_NULL,
259 token_pos, parser->pos - token_pos, token_line,
260 token_col);
261 state = STATE_ACCEPT;
262 break;
263 case STATE_F:
264 switch (c) {
265 case 'a':
266 state = STATE_FA;
267 break;
268 default:
269 token_init(&parser->token, parser,
270 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
271 - token_pos, token_line, token_col);
272 return true;
273 }
274 break;
275 case STATE_FA:
276 switch (c) {
277 case 'l':
278 state = STATE_FAL;
279 break;
280 default:
281 token_init(&parser->token, parser,
282 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
283 - token_pos, token_line, token_col);
284 return true;
285 }
286 break;
287 case STATE_FAL:
288 switch (c) {
289 case 's':
290 state = STATE_FALS;
291 break;
292 default:
293 token_init(&parser->token, parser,
294 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
295 - token_pos, token_line, token_col);
296 return true;
297 }
298 break;
299 case STATE_FALS:
300 switch (c) {
301 case 'e':
302 state = STATE_FALSE;
303 break;
304 default:
305 token_init(&parser->token, parser,
306 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
307 - token_pos, token_line, token_col);
308 return true;
309 }
310 break;
311 case STATE_FALSE:
312 switch (c) {
313 case ' ': case '\b': case '\n': case '\r': case '\t':
314 case '\0':
315 case '[': case ']': case '{': case '}': case ':':
316 case ',':
317 break;
318 default:
319 token_init(&parser->token, parser,
320 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
321 - token_pos, token_line, token_col);
322 return true;
323 }
324 token_init(&parser->token, parser,
325 TOKEN_TYPE_FALSE, token_pos, parser->pos -
326 token_pos, token_line, token_col);
327 state = STATE_ACCEPT;
328 break;
329 case STATE_T:
330 switch (c) {
331 case 'r':
332 state = STATE_TR;
333 break;
334 default:
335 token_init(&parser->token, parser,
336 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
337 - token_pos, token_line, token_col);
338 return true;
339 }
340 break;
341 case STATE_TR:
342 switch (c) {
343 case 'u':
344 state = STATE_TRU;
345 break;
346 default:
347 token_init(&parser->token, parser,
348 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
349 - token_pos, token_line, token_col);
350 return true;
351 }
352 break;
353 case STATE_TRU:
354 switch (c) {
355 case 'e':
356 state = STATE_TRUE;
357 break;
358 default:
359 token_init(&parser->token, parser,
360 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
361 - token_pos, token_line, token_col);
362 return true;
363 }
364 break;
365 case STATE_TRUE:
366 switch (c) {
367 case ' ': case '\b': case '\n': case '\r': case '\t':
368 case '\0':
369 case '[': case ']': case '{': case '}': case ':':
370 case ',':
371 break;
372 default:
373 token_init(&parser->token, parser,
374 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
375 - token_pos, token_line, token_col);
376 return true;
377 }
378 token_init(&parser->token, parser, TOKEN_TYPE_TRUE,
379 token_pos, parser->pos - token_pos, token_line,
380 token_col);
381 state = STATE_ACCEPT;
382 break;
383 case STATE_LBRACKET:
384 token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET,
385 token_pos, parser->pos - token_pos, token_line,
386 token_col);
387 state = STATE_ACCEPT;
388 break;
389 case STATE_RBRACKET:
390 token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET,
391 token_pos, parser->pos - token_pos, token_line,
392 token_col);
393 state = STATE_ACCEPT;
394 break;
395 case STATE_LBRACE:
396 token_init(&parser->token, parser, TOKEN_TYPE_LBRACE,
397 token_pos, parser->pos - token_pos, token_line,
398 token_col);
399 state = STATE_ACCEPT;
400 break;
401 case STATE_RBRACE:
402 token_init(&parser->token, parser, TOKEN_TYPE_RBRACE,
403 token_pos, parser->pos - token_pos, token_line,
404 token_col);
405 state = STATE_ACCEPT;
406 break;
407 case STATE_COLON:
408 token_init(&parser->token, parser, TOKEN_TYPE_COLON,
409 token_pos, parser->pos - token_pos, token_line,
410 token_col);
411 state = STATE_ACCEPT;
412 break;
413 case STATE_COMMA:
414 token_init(&parser->token, parser, TOKEN_TYPE_COMMA,
415 token_pos, parser->pos - token_pos, token_line,
416 token_col);
417 state = STATE_ACCEPT;
418 break;
419 case STATE_CHARS:
420 switch (c) {
421 case '\\':
422 state = STATE_CHAR_ESCAPE;
423 break;
424 case '"':
425 state = STATE_STRING;
426 break;
427 case 0x00: case 0x01: case 0x02: case 0x03: case 0x04:
428 case 0x05: case 0x06: case 0x07: case 0x08: case 0x09:
429 case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e:
430 case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13:
431 case 0x14: case 0x15: case 0x16: case 0x17: case 0x18:
432 case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d:
433 case 0x1e: case 0x1f:
434 token_init(&parser->token, parser,
435 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
436 - token_pos, token_line, token_col);
437 return true;
438 default:
439 break;
440 }
441 break;
442 case STATE_CHAR_ESCAPE:
443 switch (c) {
444 case '"': case '\\': case '/': case 'b': case 'n':
445 case 'r': case 't':
446 state = STATE_CHARS;
447 break;
448 case 'u':
449 state = STATE_CHAR_U;
450 break;
451 default:
452 token_init(&parser->token, parser,
453 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
454 - token_pos, token_line, token_col);
455 return true;
456 }
457 break;
458 case STATE_CHAR_U:
459 switch (c) {
460 case '0': case '1': case '2': case '3': case '4':
461 case '5': case '6': case '7': case '8': case '9':
462 case 'a': case 'b': case 'c': case 'd': case 'e':
463 case 'f':
464 case 'A': case 'B': case 'C': case 'D': case 'E':
465 case 'F':
466 state = STATE_CHAR_UD;
467 break;
468 default:
469 token_init(&parser->token, parser,
470 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
471 - token_pos, token_line, token_col);
472 return true;
473 }
474 break;
475 case STATE_CHAR_UD:
476 switch (c) {
477 case '0': case '1': case '2': case '3': case '4':
478 case '5': case '6': case '7': case '8': case '9':
479 case 'a': case 'b': case 'c': case 'd': case 'e':
480 case 'f':
481 case 'A': case 'B': case 'C': case 'D': case 'E':
482 case 'F':
483 state = STATE_CHAR_UDD;
484 break;
485 default:
486 token_init(&parser->token, parser,
487 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
488 - token_pos, token_line, token_col);
489 return true;
490 }
491 break;
492 case STATE_CHAR_UDD:
493 switch (c) {
494 case '0': case '1': case '2': case '3': case '4':
495 case '5': case '6': case '7': case '8': case '9':
496 case 'a': case 'b': case 'c': case 'd': case 'e':
497 case 'f':
498 case 'A': case 'B': case 'C': case 'D': case 'E':
499 case 'F':
500 state = STATE_CHAR_UDDD;
501 break;
502 default:
503 token_init(&parser->token, parser,
504 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
505 - token_pos, token_line, token_col);
506 return true;
507 }
508 break;
509 case STATE_CHAR_UDDD:
510 switch (c) {
511 case '0': case '1': case '2': case '3': case '4':
512 case '5': case '6': case '7': case '8': case '9':
513 case 'a': case 'b': case 'c': case 'd': case 'e':
514 case 'f':
515 case 'A': case 'B': case 'C': case 'D': case 'E':
516 case 'F':
517 state = STATE_CHARS;
518 break;
519 default:
520 token_init(&parser->token, parser,
521 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
522 - token_pos, token_line, token_col);
523 return true;
524 }
525 break;
526 case STATE_STRING:
527 token_init(&parser->token, parser, TOKEN_TYPE_STRING,
528 token_pos, parser->pos - token_pos, token_line,
529 token_col);
530 state = STATE_ACCEPT;
531 break;
532 case STATE_MINUS:
533 switch (c) {
534 case '0':
535 state = STATE_LEADING_ZERO;
536 break;
537 case '1': case '2': case '3': case '4':
538 case '5': case '6': case '7': case '8': case '9':
539 state = STATE_DIGITS;
540 break;
541 default:
542 token_init(&parser->token, parser,
543 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
544 - token_pos, token_line, token_col);
545 return true;
546 }
547 break;
548 case STATE_LEADING_ZERO:
549 switch (c) {
550 case '.':
551 state = STATE_DECIMAL;
552 break;
553 default:
554 token_init(&parser->token, parser,
555 TOKEN_TYPE_NUMBER, token_pos, parser->pos -
556 token_pos, token_line, token_col);
557 state = STATE_ACCEPT;
558 break;
559 }
560 break;
561 case STATE_DIGITS:
562 switch (c) {
563 case '0': case '1': case '2': case '3': case '4':
564 case '5': case '6': case '7': case '8': case '9':
565 break;
566 case '.':
567 state = STATE_DECIMAL;
568 break;
569 default:
570 token_init(&parser->token, parser,
571 TOKEN_TYPE_NUMBER, token_pos, parser->pos -
572 token_pos, token_line, token_col);
573 state = STATE_ACCEPT;
574 break;
575 }
576 break;
577 case STATE_DECIMAL:
578 switch (c) {
579 case '0': case '1': case '2': case '3': case '4':
580 case '5': case '6': case '7': case '8': case '9':
581 state = STATE_FRAC_DIGITS;
582 break;
583 default:
584 token_init(&parser->token, parser,
585 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
586 - token_pos, token_line, token_col);
587 return true;
588 }
589 break;
590 case STATE_FRAC_DIGITS:
591 switch (c) {
592 case '0': case '1': case '2': case '3': case '4':
593 case '5': case '6': case '7': case '8': case '9':
594 break;
595 case 'e': case 'E':
596 state = STATE_EXP;
597 break;
598 default:
599 token_init(&parser->token, parser,
600 TOKEN_TYPE_NUMBER, token_pos, parser->pos -
601 token_pos, token_line, token_col);
602 state = STATE_ACCEPT;
603 break;
604 }
605 break;
606 case STATE_EXP:
607 switch (c) {
608 case '-': case '+':
609 state = STATE_EXP_SIGN;
610 break;
611 case '0': case '1': case '2': case '3': case '4':
612 case '5': case '6': case '7': case '8': case '9':
613 state = STATE_EXP_DIGITS;
614 break;
615 default:
616 token_init(&parser->token, parser,
617 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
618 - token_pos, token_line, token_col);
619 return true;
620 }
621 break;
622 case STATE_EXP_SIGN:
623 switch (c) {
624 case '0': case '1': case '2': case '3': case '4':
625 case '5': case '6': case '7': case '8': case '9':
626 state = STATE_EXP_DIGITS;
627 break;
628 default:
629 token_init(&parser->token, parser,
630 TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
631 - token_pos, token_line, token_col);
632 return true;
633 }
634 break;
635 case STATE_EXP_DIGITS:
636 switch (c) {
637 case '0': case '1': case '2': case '3': case '4':
638 case '5': case '6': case '7': case '8': case '9':
639 break;
640 default:
641 token_init(&parser->token, parser,
642 TOKEN_TYPE_NUMBER, token_pos, parser->pos -
643 token_pos, token_line, token_col);
644 state = STATE_ACCEPT;
645 break;
646 }
647 break;
648 default:
649 not_reached();
650 }
651
652 if (state != STATE_ACCEPT) {
653 if (c == '\n') {
654 parser->line++;
655 parser->col = 0;
656 } else {
657 parser->col++;
658 }
659 parser->pos++;
660 }
661 }
662 return false;
663}
664
665static bool parser_parse_array(parser_t *parser);
666static bool parser_parse_object(parser_t *parser);
667
668static bool
669parser_parse_value(parser_t *parser) {
670 switch (parser->token.token_type) {
671 case TOKEN_TYPE_NULL:
672 case TOKEN_TYPE_FALSE:
673 case TOKEN_TYPE_TRUE:
674 case TOKEN_TYPE_STRING:
675 case TOKEN_TYPE_NUMBER:
676 return false;
677 case TOKEN_TYPE_LBRACE:
678 return parser_parse_object(parser);
679 case TOKEN_TYPE_LBRACKET:
680 return parser_parse_array(parser);
681 default:
682 return true;
683 }
684 not_reached();
685}
686
687static bool
688parser_parse_pair(parser_t *parser) {
689 expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
690 "Pair should start with string");
691 if (parser_tokenize(parser)) {
692 return true;
693 }
694 switch (parser->token.token_type) {
695 case TOKEN_TYPE_COLON:
696 if (parser_tokenize(parser)) {
697 return true;
698 }
699 return parser_parse_value(parser);
700 default:
701 return true;
702 }
703}
704
705static bool
706parser_parse_values(parser_t *parser) {
707 if (parser_parse_value(parser)) {
708 return true;
709 }
710
711 while (true) {
712 if (parser_tokenize(parser)) {
713 return true;
714 }
715 switch (parser->token.token_type) {
716 case TOKEN_TYPE_COMMA:
717 if (parser_tokenize(parser)) {
718 return true;
719 }
720 if (parser_parse_value(parser)) {
721 return true;
722 }
723 break;
724 case TOKEN_TYPE_RBRACKET:
725 return false;
726 default:
727 return true;
728 }
729 }
730}
731
732static bool
733parser_parse_array(parser_t *parser) {
734 expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
735 "Array should start with [");
736 if (parser_tokenize(parser)) {
737 return true;
738 }
739 switch (parser->token.token_type) {
740 case TOKEN_TYPE_RBRACKET:
741 return false;
742 default:
743 return parser_parse_values(parser);
744 }
745 not_reached();
746}
747
748static bool
749parser_parse_pairs(parser_t *parser) {
750 expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
751 "Object should start with string");
752 if (parser_parse_pair(parser)) {
753 return true;
754 }
755
756 while (true) {
757 if (parser_tokenize(parser)) {
758 return true;
759 }
760 switch (parser->token.token_type) {
761 case TOKEN_TYPE_COMMA:
762 if (parser_tokenize(parser)) {
763 return true;
764 }
765 switch (parser->token.token_type) {
766 case TOKEN_TYPE_STRING:
767 if (parser_parse_pair(parser)) {
768 return true;
769 }
770 break;
771 default:
772 return true;
773 }
774 break;
775 case TOKEN_TYPE_RBRACE:
776 return false;
777 default:
778 return true;
779 }
780 }
781}
782
783static bool
784parser_parse_object(parser_t *parser) {
785 expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
786 "Object should start with {");
787 if (parser_tokenize(parser)) {
788 return true;
789 }
790 switch (parser->token.token_type) {
791 case TOKEN_TYPE_STRING:
792 return parser_parse_pairs(parser);
793 case TOKEN_TYPE_RBRACE:
794 return false;
795 default:
796 return true;
797 }
798 not_reached();
799}
800
801static bool
802parser_parse(parser_t *parser) {
803 if (parser_tokenize(parser)) {
804 goto label_error;
805 }
806 if (parser_parse_value(parser)) {
807 goto label_error;
808 }
809
810 if (parser_tokenize(parser)) {
811 goto label_error;
812 }
813 switch (parser->token.token_type) {
814 case TOKEN_TYPE_EOI:
815 return false;
816 default:
817 goto label_error;
818 }
819 not_reached();
820
821label_error:
822 token_error(&parser->token);
823 return true;
824}
825
826TEST_BEGIN(test_json_parser) {
827 size_t i;
828 const char *invalid_inputs[] = {
829 /* Tokenizer error case tests. */
830 "{ \"string\": X }",
831 "{ \"string\": nXll }",
832 "{ \"string\": nuXl }",
833 "{ \"string\": nulX }",
834 "{ \"string\": nullX }",
835 "{ \"string\": fXlse }",
836 "{ \"string\": faXse }",
837 "{ \"string\": falXe }",
838 "{ \"string\": falsX }",
839 "{ \"string\": falseX }",
840 "{ \"string\": tXue }",
841 "{ \"string\": trXe }",
842 "{ \"string\": truX }",
843 "{ \"string\": trueX }",
844 "{ \"string\": \"\n\" }",
845 "{ \"string\": \"\\z\" }",
846 "{ \"string\": \"\\uX000\" }",
847 "{ \"string\": \"\\u0X00\" }",
848 "{ \"string\": \"\\u00X0\" }",
849 "{ \"string\": \"\\u000X\" }",
850 "{ \"string\": -X }",
851 "{ \"string\": 0.X }",
852 "{ \"string\": 0.0eX }",
853 "{ \"string\": 0.0e+X }",
854
855 /* Parser error test cases. */
856 "{\"string\": }",
857 "{\"string\" }",
858 "{\"string\": [ 0 }",
859 "{\"string\": {\"a\":0, 1 } }",
860 "{\"string\": {\"a\":0: } }",
861 "{",
862 "{}{",
863 };
864 const char *valid_inputs[] = {
865 /* Token tests. */
866 "null",
867 "false",
868 "true",
869 "{}",
870 "{\"a\": 0}",
871 "[]",
872 "[0, 1]",
873 "0",
874 "1",
875 "10",
876 "-10",
877 "10.23",
878 "10.23e4",
879 "10.23e-4",
880 "10.23e+4",
881 "10.23E4",
882 "10.23E-4",
883 "10.23E+4",
884 "-10.23",
885 "-10.23e4",
886 "-10.23e-4",
887 "-10.23e+4",
888 "-10.23E4",
889 "-10.23E-4",
890 "-10.23E+4",
891 "\"value\"",
892 "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"",
893
894 /* Parser test with various nesting. */
895 "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}",
896 };
897
898 for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) {
899 const char *input = invalid_inputs[i];
900 parser_t parser;
901 parser_init(&parser, false);
902 expect_false(parser_append(&parser, input),
903 "Unexpected input appending failure");
904 expect_true(parser_parse(&parser),
905 "Unexpected parse success for input: %s", input);
906 parser_fini(&parser);
907 }
908
909 for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) {
910 const char *input = valid_inputs[i];
911 parser_t parser;
912 parser_init(&parser, true);
913 expect_false(parser_append(&parser, input),
914 "Unexpected input appending failure");
915 expect_false(parser_parse(&parser),
916 "Unexpected parse error for input: %s", input);
917 parser_fini(&parser);
918 }
919}
920TEST_END
921
922void
923write_cb(void *opaque, const char *str) {
924 parser_t *parser = (parser_t *)opaque;
925 if (parser_append(parser, str)) {
926 test_fail("Unexpected input appending failure");
927 }
928}
929
930TEST_BEGIN(test_stats_print_json) {
931 const char *opts[] = {
932 "J",
933 "Jg",
934 "Jm",
935 "Jd",
936 "Jmd",
937 "Jgd",
938 "Jgm",
939 "Jgmd",
940 "Ja",
941 "Jb",
942 "Jl",
943 "Jx",
944 "Jbl",
945 "Jal",
946 "Jab",
947 "Jabl",
948 "Jax",
949 "Jbx",
950 "Jlx",
951 "Jablx",
952 "Jgmdablx",
953 };
954 unsigned arena_ind, i;
955
956 for (i = 0; i < 3; i++) {
957 unsigned j;
958
959 switch (i) {
960 case 0:
961 break;
962 case 1: {
963 size_t sz = sizeof(arena_ind);
964 expect_d_eq(mallctl("arenas.create", (void *)&arena_ind,
965 &sz, NULL, 0), 0, "Unexpected mallctl failure");
966 break;
967 } case 2: {
968 size_t mib[3];
969 size_t miblen = sizeof(mib)/sizeof(size_t);
970 expect_d_eq(mallctlnametomib("arena.0.destroy",
971 mib, &miblen), 0,
972 "Unexpected mallctlnametomib failure");
973 mib[1] = arena_ind;
974 expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
975 0), 0, "Unexpected mallctlbymib failure");
976 break;
977 } default:
978 not_reached();
979 }
980
981 for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) {
982 parser_t parser;
983
984 parser_init(&parser, true);
985 malloc_stats_print(write_cb, (void *)&parser, opts[j]);
986 expect_false(parser_parse(&parser),
987 "Unexpected parse error, opts=\"%s\"", opts[j]);
988 parser_fini(&parser);
989 }
990 }
991}
992TEST_END
993
994int
995main(void) {
996 return test(
997 test_json_parser,
998 test_stats_print_json);
999}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/sz.c b/examples/redis-unstable/deps/jemalloc/test/unit/sz.c
deleted file mode 100644
index 8ae04b9..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/sz.c
+++ /dev/null
@@ -1,66 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_sz_psz2ind) {
4 /*
5 * Testing page size classes which reside prior to the regular group
6 * with all size classes divisible by page size.
7 * For x86_64 Linux, it's 4096, 8192, 12288, 16384, with corresponding
8 * pszind 0, 1, 2 and 3.
9 */
10 for (size_t i = 0; i < SC_NGROUP; i++) {
11 for (size_t psz = i * PAGE + 1; psz <= (i + 1) * PAGE; psz++) {
12 pszind_t ind = sz_psz2ind(psz);
13 expect_zu_eq(ind, i, "Got %u as sz_psz2ind of %zu", ind,
14 psz);
15 }
16 }
17
18 sc_data_t data;
19 memset(&data, 0, sizeof(data));
20 sc_data_init(&data);
21 /*
22 * 'base' is the base of the first regular group with all size classes
23 * divisible by page size.
24 * For x86_64 Linux, it's 16384, and base_ind is 36.
25 */
26 size_t base_psz = 1 << (SC_LG_NGROUP + LG_PAGE);
27 size_t base_ind = 0;
28 while (base_ind < SC_NSIZES &&
29 reg_size_compute(data.sc[base_ind].lg_base,
30 data.sc[base_ind].lg_delta,
31 data.sc[base_ind].ndelta) < base_psz) {
32 base_ind++;
33 }
34 expect_zu_eq(
35 reg_size_compute(data.sc[base_ind].lg_base,
36 data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta),
37 base_psz, "Size class equal to %zu not found", base_psz);
38 /*
39 * Test different sizes falling into groups after the 'base'. The
40 * increment is PAGE / 3 for the execution speed purpose.
41 */
42 base_ind -= SC_NGROUP;
43 for (size_t psz = base_psz; psz <= 64 * 1024 * 1024; psz += PAGE / 3) {
44 pszind_t ind = sz_psz2ind(psz);
45 sc_t gt_sc = data.sc[ind + base_ind];
46 expect_zu_gt(psz,
47 reg_size_compute(gt_sc.lg_base, gt_sc.lg_delta,
48 gt_sc.ndelta),
49 "Got %u as sz_psz2ind of %zu", ind, psz);
50 sc_t le_sc = data.sc[ind + base_ind + 1];
51 expect_zu_le(psz,
52 reg_size_compute(le_sc.lg_base, le_sc.lg_delta,
53 le_sc.ndelta),
54 "Got %u as sz_psz2ind of %zu", ind, psz);
55 }
56
57 pszind_t max_ind = sz_psz2ind(SC_LARGE_MAXCLASS + 1);
58 expect_lu_eq(max_ind, SC_NPSIZES,
59 "Got %u as sz_psz2ind of %llu", max_ind, SC_LARGE_MAXCLASS);
60}
61TEST_END
62
63int
64main(void) {
65 return test(test_sz_psz2ind);
66}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.c b/examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.c
deleted file mode 100644
index 1f657c8..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.c
+++ /dev/null
@@ -1,175 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "test/san.h"
3
4const char *malloc_conf = TEST_SAN_UAF_ALIGN_DISABLE;
5
6enum {
7 alloc_option_start = 0,
8 use_malloc = 0,
9 use_mallocx,
10 alloc_option_end
11};
12
13enum {
14 dalloc_option_start = 0,
15 use_free = 0,
16 use_dallocx,
17 use_sdallocx,
18 dalloc_option_end
19};
20
21static unsigned alloc_option, dalloc_option;
22static size_t tcache_max;
23
24static void *
25alloc_func(size_t sz) {
26 void *ret;
27
28 switch (alloc_option) {
29 case use_malloc:
30 ret = malloc(sz);
31 break;
32 case use_mallocx:
33 ret = mallocx(sz, 0);
34 break;
35 default:
36 unreachable();
37 }
38 expect_ptr_not_null(ret, "Unexpected malloc / mallocx failure");
39
40 return ret;
41}
42
43static void
44dalloc_func(void *ptr, size_t sz) {
45 switch (dalloc_option) {
46 case use_free:
47 free(ptr);
48 break;
49 case use_dallocx:
50 dallocx(ptr, 0);
51 break;
52 case use_sdallocx:
53 sdallocx(ptr, sz, 0);
54 break;
55 default:
56 unreachable();
57 }
58}
59
60static size_t
61tcache_bytes_read(void) {
62 uint64_t epoch;
63 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
64 0, "Unexpected mallctl() failure");
65
66 size_t tcache_bytes;
67 size_t sz = sizeof(tcache_bytes);
68 assert_d_eq(mallctl(
69 "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
70 &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
71
72 return tcache_bytes;
73}
74
75static void
76tcache_bytes_check_update(size_t *prev, ssize_t diff) {
77 size_t tcache_bytes = tcache_bytes_read();
78 expect_zu_eq(tcache_bytes, *prev + diff, "tcache bytes not expected");
79
80 *prev += diff;
81}
82
83static void
84test_tcache_bytes_alloc(size_t alloc_size) {
85 expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0,
86 "Unexpected tcache flush failure");
87
88 size_t usize = sz_s2u(alloc_size);
89 /* No change is expected if usize is outside of tcache_max range. */
90 bool cached = (usize <= tcache_max);
91 ssize_t diff = cached ? usize : 0;
92
93 void *ptr1 = alloc_func(alloc_size);
94 void *ptr2 = alloc_func(alloc_size);
95
96 size_t bytes = tcache_bytes_read();
97 dalloc_func(ptr2, alloc_size);
98 /* Expect tcache_bytes increase after dalloc */
99 tcache_bytes_check_update(&bytes, diff);
100
101 dalloc_func(ptr1, alloc_size);
102 /* Expect tcache_bytes increase again */
103 tcache_bytes_check_update(&bytes, diff);
104
105 void *ptr3 = alloc_func(alloc_size);
106 if (cached) {
107 expect_ptr_eq(ptr1, ptr3, "Unexpected cached ptr");
108 }
109 /* Expect tcache_bytes decrease after alloc */
110 tcache_bytes_check_update(&bytes, -diff);
111
112 void *ptr4 = alloc_func(alloc_size);
113 if (cached) {
114 expect_ptr_eq(ptr2, ptr4, "Unexpected cached ptr");
115 }
116 /* Expect tcache_bytes decrease again */
117 tcache_bytes_check_update(&bytes, -diff);
118
119 dalloc_func(ptr3, alloc_size);
120 tcache_bytes_check_update(&bytes, diff);
121 dalloc_func(ptr4, alloc_size);
122 tcache_bytes_check_update(&bytes, diff);
123}
124
125static void
126test_tcache_max_impl(void) {
127 size_t sz;
128 sz = sizeof(tcache_max);
129 assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
130 &sz, NULL, 0), 0, "Unexpected mallctl() failure");
131
132 /* opt.tcache_max set to 1024 in tcache_max.sh */
133 expect_zu_eq(tcache_max, 1024, "tcache_max not expected");
134
135 test_tcache_bytes_alloc(1);
136 test_tcache_bytes_alloc(tcache_max - 1);
137 test_tcache_bytes_alloc(tcache_max);
138 test_tcache_bytes_alloc(tcache_max + 1);
139
140 test_tcache_bytes_alloc(PAGE - 1);
141 test_tcache_bytes_alloc(PAGE);
142 test_tcache_bytes_alloc(PAGE + 1);
143
144 size_t large;
145 sz = sizeof(large);
146 assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
147 0), 0, "Unexpected mallctl() failure");
148
149 test_tcache_bytes_alloc(large - 1);
150 test_tcache_bytes_alloc(large);
151 test_tcache_bytes_alloc(large + 1);
152}
153
154TEST_BEGIN(test_tcache_max) {
155 test_skip_if(!config_stats);
156 test_skip_if(!opt_tcache);
157 test_skip_if(opt_prof);
158 test_skip_if(san_uaf_detection_enabled());
159
160 for (alloc_option = alloc_option_start;
161 alloc_option < alloc_option_end;
162 alloc_option++) {
163 for (dalloc_option = dalloc_option_start;
164 dalloc_option < dalloc_option_end;
165 dalloc_option++) {
166 test_tcache_max_impl();
167 }
168 }
169}
170TEST_END
171
172int
173main(void) {
174 return test(test_tcache_max);
175}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.sh b/examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.sh
deleted file mode 100644
index 4480d73..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/tcache_max.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="tcache_max:1024"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/test_hooks.c b/examples/redis-unstable/deps/jemalloc/test/unit/test_hooks.c
deleted file mode 100644
index 8cd2b3b..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/test_hooks.c
+++ /dev/null
@@ -1,38 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static bool hook_called = false;
4
5static void
6hook() {
7 hook_called = true;
8}
9
10static int
11func_to_hook(int arg1, int arg2) {
12 return arg1 + arg2;
13}
14
15#define func_to_hook JEMALLOC_TEST_HOOK(func_to_hook, test_hooks_libc_hook)
16
17TEST_BEGIN(unhooked_call) {
18 test_hooks_libc_hook = NULL;
19 hook_called = false;
20 expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
21 expect_false(hook_called, "Nulling out hook didn't take.");
22}
23TEST_END
24
25TEST_BEGIN(hooked_call) {
26 test_hooks_libc_hook = &hook;
27 hook_called = false;
28 expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
29 expect_true(hook_called, "Hook should have executed.");
30}
31TEST_END
32
33int
34main(void) {
35 return test(
36 unhooked_call,
37 hooked_call);
38}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/thread_event.c b/examples/redis-unstable/deps/jemalloc/test/unit/thread_event.c
deleted file mode 100644
index e0b88a9..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/thread_event.c
+++ /dev/null
@@ -1,34 +0,0 @@
1#include "test/jemalloc_test.h"
2
3TEST_BEGIN(test_next_event_fast) {
4 tsd_t *tsd = tsd_fetch();
5 te_ctx_t ctx;
6 te_ctx_get(tsd, &ctx, true);
7
8 te_ctx_last_event_set(&ctx, 0);
9 te_ctx_current_bytes_set(&ctx, TE_NEXT_EVENT_FAST_MAX - 8U);
10 te_ctx_next_event_set(tsd, &ctx, TE_NEXT_EVENT_FAST_MAX);
11#define E(event, condition, is_alloc) \
12 if (is_alloc && condition) { \
13 event##_event_wait_set(tsd, TE_NEXT_EVENT_FAST_MAX); \
14 }
15 ITERATE_OVER_ALL_EVENTS
16#undef E
17
18 /* Test next_event_fast rolling back to 0. */
19 void *p = malloc(16U);
20 assert_ptr_not_null(p, "malloc() failed");
21 free(p);
22
23 /* Test next_event_fast resuming to be equal to next_event. */
24 void *q = malloc(SC_LOOKUP_MAXCLASS);
25 assert_ptr_not_null(q, "malloc() failed");
26 free(q);
27}
28TEST_END
29
30int
31main(void) {
32 return test(
33 test_next_event_fast);
34}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/thread_event.sh b/examples/redis-unstable/deps/jemalloc/test/unit/thread_event.sh
deleted file mode 100644
index 8fcc7d8..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/thread_event.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_prof}" = "x1" ] ; then
4 export MALLOC_CONF="prof:true,lg_prof_sample:0"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/ticker.c b/examples/redis-unstable/deps/jemalloc/test/unit/ticker.c
deleted file mode 100644
index 0dd7786..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/ticker.c
+++ /dev/null
@@ -1,100 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/ticker.h"
4
5TEST_BEGIN(test_ticker_tick) {
6#define NREPS 2
7#define NTICKS 3
8 ticker_t ticker;
9 int32_t i, j;
10
11 ticker_init(&ticker, NTICKS);
12 for (i = 0; i < NREPS; i++) {
13 for (j = 0; j < NTICKS; j++) {
14 expect_u_eq(ticker_read(&ticker), NTICKS - j,
15 "Unexpected ticker value (i=%d, j=%d)", i, j);
16 expect_false(ticker_tick(&ticker),
17 "Unexpected ticker fire (i=%d, j=%d)", i, j);
18 }
19 expect_u32_eq(ticker_read(&ticker), 0,
20 "Expected ticker depletion");
21 expect_true(ticker_tick(&ticker),
22 "Expected ticker fire (i=%d)", i);
23 expect_u32_eq(ticker_read(&ticker), NTICKS,
24 "Expected ticker reset");
25 }
26#undef NTICKS
27}
28TEST_END
29
30TEST_BEGIN(test_ticker_ticks) {
31#define NTICKS 3
32 ticker_t ticker;
33
34 ticker_init(&ticker, NTICKS);
35
36 expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
37 expect_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
38 expect_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
39 expect_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
40 expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
41
42 expect_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
43 expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
44#undef NTICKS
45}
46TEST_END
47
48TEST_BEGIN(test_ticker_copy) {
49#define NTICKS 3
50 ticker_t ta, tb;
51
52 ticker_init(&ta, NTICKS);
53 ticker_copy(&tb, &ta);
54 expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
55 expect_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
56 expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
57
58 ticker_tick(&ta);
59 ticker_copy(&tb, &ta);
60 expect_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
61 expect_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
62 expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
63#undef NTICKS
64}
65TEST_END
66
67TEST_BEGIN(test_ticker_geom) {
68 const int32_t ticks = 100;
69 const uint64_t niters = 100 * 1000;
70
71 ticker_geom_t ticker;
72 ticker_geom_init(&ticker, ticks);
73 uint64_t total_ticks = 0;
74 /* Just some random constant. */
75 uint64_t prng_state = 0x343219f93496db9fULL;
76 for (uint64_t i = 0; i < niters; i++) {
77 while(!ticker_geom_tick(&ticker, &prng_state)) {
78 total_ticks++;
79 }
80 }
81 /*
82 * In fact, with this choice of random seed and the PRNG implementation
83 * used at the time this was tested, total_ticks is 95.1% of the
84 * expected ticks.
85 */
86 expect_u64_ge(total_ticks , niters * ticks * 9 / 10,
87 "Mean off by > 10%%");
88 expect_u64_le(total_ticks , niters * ticks * 11 / 10,
89 "Mean off by > 10%%");
90}
91TEST_END
92
93int
94main(void) {
95 return test(
96 test_ticker_tick,
97 test_ticker_ticks,
98 test_ticker_copy,
99 test_ticker_geom);
100}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/tsd.c b/examples/redis-unstable/deps/jemalloc/test/unit/tsd.c
deleted file mode 100644
index 205d870..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/tsd.c
+++ /dev/null
@@ -1,274 +0,0 @@
1#include "test/jemalloc_test.h"
2
3/*
4 * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
5 * be asserting that we're on one.
6 */
7static bool originally_fast;
8static int data_cleanup_count;
9
10void
11data_cleanup(int *data) {
12 if (data_cleanup_count == 0) {
13 expect_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
14 "Argument passed into cleanup function should match tsd "
15 "value");
16 }
17 ++data_cleanup_count;
18
19 /*
20 * Allocate during cleanup for two rounds, in order to assure that
21 * jemalloc's internal tsd reinitialization happens.
22 */
23 bool reincarnate = false;
24 switch (*data) {
25 case MALLOC_TSD_TEST_DATA_INIT:
26 *data = 1;
27 reincarnate = true;
28 break;
29 case 1:
30 *data = 2;
31 reincarnate = true;
32 break;
33 case 2:
34 return;
35 default:
36 not_reached();
37 }
38
39 if (reincarnate) {
40 void *p = mallocx(1, 0);
41 expect_ptr_not_null(p, "Unexpeced mallocx() failure");
42 dallocx(p, 0);
43 }
44}
45
46static void *
47thd_start(void *arg) {
48 int d = (int)(uintptr_t)arg;
49 void *p;
50
51 /*
52 * Test free before tsd init -- the free fast path (which does not
53 * explicitly check for NULL) has to tolerate this case, and fall back
54 * to free_default.
55 */
56 free(NULL);
57
58 tsd_t *tsd = tsd_fetch();
59 expect_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
60 "Initial tsd get should return initialization value");
61
62 p = malloc(1);
63 expect_ptr_not_null(p, "Unexpected malloc() failure");
64
65 tsd_test_data_set(tsd, d);
66 expect_x_eq(tsd_test_data_get(tsd), d,
67 "After tsd set, tsd get should return value that was set");
68
69 d = 0;
70 expect_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
71 "Resetting local data should have no effect on tsd");
72
73 tsd_test_callback_set(tsd, &data_cleanup);
74
75 free(p);
76 return NULL;
77}
78
79TEST_BEGIN(test_tsd_main_thread) {
80 thd_start((void *)(uintptr_t)0xa5f3e329);
81}
82TEST_END
83
84TEST_BEGIN(test_tsd_sub_thread) {
85 thd_t thd;
86
87 data_cleanup_count = 0;
88 thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT);
89 thd_join(thd, NULL);
90 /*
91 * We reincarnate twice in the data cleanup, so it should execute at
92 * least 3 times.
93 */
94 expect_x_ge(data_cleanup_count, 3,
95 "Cleanup function should have executed multiple times.");
96}
97TEST_END
98
99static void *
100thd_start_reincarnated(void *arg) {
101 tsd_t *tsd = tsd_fetch();
102 assert(tsd);
103
104 void *p = malloc(1);
105 expect_ptr_not_null(p, "Unexpected malloc() failure");
106
107 /* Manually trigger reincarnation. */
108 expect_ptr_not_null(tsd_arena_get(tsd),
109 "Should have tsd arena set.");
110 tsd_cleanup((void *)tsd);
111 expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
112 "TSD arena should have been cleared.");
113 expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
114 "TSD state should be purgatory\n");
115
116 free(p);
117 expect_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
118 "TSD state should be reincarnated\n");
119 p = mallocx(1, MALLOCX_TCACHE_NONE);
120 expect_ptr_not_null(p, "Unexpected malloc() failure");
121 expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
122 "Should not have tsd arena set after reincarnation.");
123
124 free(p);
125 tsd_cleanup((void *)tsd);
126 expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
127 "TSD arena should have been cleared after 2nd cleanup.");
128
129 return NULL;
130}
131
132TEST_BEGIN(test_tsd_reincarnation) {
133 thd_t thd;
134 thd_create(&thd, thd_start_reincarnated, NULL);
135 thd_join(thd, NULL);
136}
137TEST_END
138
139typedef struct {
140 atomic_u32_t phase;
141 atomic_b_t error;
142} global_slow_data_t;
143
144static void *
145thd_start_global_slow(void *arg) {
146 /* PHASE 0 */
147 global_slow_data_t *data = (global_slow_data_t *)arg;
148 free(mallocx(1, 0));
149
150 tsd_t *tsd = tsd_fetch();
151 /*
152 * No global slowness has happened yet; there was an error if we were
153 * originally fast but aren't now.
154 */
155 atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
156 ATOMIC_SEQ_CST);
157 atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
158
159 /* PHASE 2 */
160 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
161 }
162 free(mallocx(1, 0));
163 atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
164 atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
165
166 /* PHASE 4 */
167 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
168 }
169 free(mallocx(1, 0));
170 atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
171 atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
172
173 /* PHASE 6 */
174 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
175 }
176 free(mallocx(1, 0));
177 /* Only one decrement so far. */
178 atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
179 atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
180
181 /* PHASE 8 */
182 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
183 }
184 free(mallocx(1, 0));
185 /*
186 * Both decrements happened; we should be fast again (if we ever
187 * were)
188 */
189 atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
190 ATOMIC_SEQ_CST);
191 atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
192
193 return NULL;
194}
195
196TEST_BEGIN(test_tsd_global_slow) {
197 global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
198 /*
199 * Note that the "mallocx" here (vs. malloc) is important, since the
200 * compiler is allowed to optimize away free(malloc(1)) but not
201 * free(mallocx(1)).
202 */
203 free(mallocx(1, 0));
204 tsd_t *tsd = tsd_fetch();
205 originally_fast = tsd_fast(tsd);
206
207 thd_t thd;
208 thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
209 /* PHASE 1 */
210 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
211 /*
212 * We don't have a portable condvar/semaphore mechanism.
213 * Spin-wait.
214 */
215 }
216 expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
217 tsd_global_slow_inc(tsd_tsdn(tsd));
218 free(mallocx(1, 0));
219 expect_false(tsd_fast(tsd), "");
220 atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
221
222 /* PHASE 3 */
223 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
224 }
225 expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
226 /* Increase again, so that we can test multiple fast/slow changes. */
227 tsd_global_slow_inc(tsd_tsdn(tsd));
228 atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
229 free(mallocx(1, 0));
230 expect_false(tsd_fast(tsd), "");
231
232 /* PHASE 5 */
233 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
234 }
235 expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
236 tsd_global_slow_dec(tsd_tsdn(tsd));
237 atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
238 /* We only decreased once; things should still be slow. */
239 free(mallocx(1, 0));
240 expect_false(tsd_fast(tsd), "");
241
242 /* PHASE 7 */
243 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
244 }
245 expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
246 tsd_global_slow_dec(tsd_tsdn(tsd));
247 atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
248 /* We incremented and then decremented twice; we should be fast now. */
249 free(mallocx(1, 0));
250 expect_true(!originally_fast || tsd_fast(tsd), "");
251
252 /* PHASE 9 */
253 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
254 }
255 expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
256
257 thd_join(thd, NULL);
258}
259TEST_END
260
261int
262main(void) {
263 /* Ensure tsd bootstrapped. */
264 if (nallocx(1, 0) == 0) {
265 malloc_printf("Initialization error");
266 return test_status_fail;
267 }
268
269 return test_no_reentrancy(
270 test_tsd_main_thread,
271 test_tsd_sub_thread,
272 test_tsd_reincarnation,
273 test_tsd_global_slow);
274}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/uaf.c b/examples/redis-unstable/deps/jemalloc/test/unit/uaf.c
deleted file mode 100644
index a8433c2..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/uaf.c
+++ /dev/null
@@ -1,262 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "test/arena_util.h"
3#include "test/san.h"
4
5#include "jemalloc/internal/cache_bin.h"
6#include "jemalloc/internal/san.h"
7#include "jemalloc/internal/safety_check.h"
8
9const char *malloc_conf = TEST_SAN_UAF_ALIGN_ENABLE;
10
11static size_t san_uaf_align;
12
13static bool fake_abort_called;
14void fake_abort(const char *message) {
15 (void)message;
16 fake_abort_called = true;
17}
18
19static void
20test_write_after_free_pre(void) {
21 safety_check_set_abort(&fake_abort);
22 fake_abort_called = false;
23}
24
25static void
26test_write_after_free_post(void) {
27 assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
28 0, "Unexpected tcache flush failure");
29 expect_true(fake_abort_called, "Use-after-free check didn't fire.");
30 safety_check_set_abort(NULL);
31}
32
33static bool
34uaf_detection_enabled(void) {
35 if (!config_uaf_detection || !san_uaf_detection_enabled()) {
36 return false;
37 }
38
39 ssize_t lg_san_uaf_align;
40 size_t sz = sizeof(lg_san_uaf_align);
41 assert_d_eq(mallctl("opt.lg_san_uaf_align", &lg_san_uaf_align, &sz,
42 NULL, 0), 0, "Unexpected mallctl failure");
43 if (lg_san_uaf_align < 0) {
44 return false;
45 }
46 assert_zd_ge(lg_san_uaf_align, LG_PAGE, "san_uaf_align out of range");
47 san_uaf_align = (size_t)1 << lg_san_uaf_align;
48
49 bool tcache_enabled;
50 sz = sizeof(tcache_enabled);
51 assert_d_eq(mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL,
52 0), 0, "Unexpected mallctl failure");
53 if (!tcache_enabled) {
54 return false;
55 }
56
57 return true;
58}
59
60static size_t
61read_tcache_stashed_bytes(unsigned arena_ind) {
62 if (!config_stats) {
63 return 0;
64 }
65
66 uint64_t epoch;
67 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
68 0, "Unexpected mallctl() failure");
69
70 size_t tcache_stashed_bytes;
71 size_t sz = sizeof(tcache_stashed_bytes);
72 assert_d_eq(mallctl(
73 "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
74 ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
75 "Unexpected mallctl failure");
76
77 return tcache_stashed_bytes;
78}
79
80static void
81test_use_after_free(size_t alloc_size, bool write_after_free) {
82 void *ptr = (void *)(uintptr_t)san_uaf_align;
83 assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
84 ptr = (void *)((uintptr_t)123 * (uintptr_t)san_uaf_align);
85 assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
86 ptr = (void *)((uintptr_t)san_uaf_align + 1);
87 assert_false(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
88
89 /*
90 * Disable purging (-1) so that all dirty pages remain committed, to
91 * make use-after-free tolerable.
92 */
93 unsigned arena_ind = do_arena_create(-1, -1);
94 int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
95
96 size_t n_max = san_uaf_align * 2;
97 void **items = mallocx(n_max * sizeof(void *), flags);
98 assert_ptr_not_null(items, "Unexpected mallocx failure");
99
100 bool found = false;
101 size_t iter = 0;
102 char magic = 's';
103 assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
104 0, "Unexpected tcache flush failure");
105 while (!found) {
106 ptr = mallocx(alloc_size, flags);
107 assert_ptr_not_null(ptr, "Unexpected mallocx failure");
108
109 found = cache_bin_nonfast_aligned(ptr);
110 *(char *)ptr = magic;
111 items[iter] = ptr;
112 assert_zu_lt(iter++, n_max, "No aligned ptr found");
113 }
114
115 if (write_after_free) {
116 test_write_after_free_pre();
117 }
118 bool junked = false;
119 while (iter-- != 0) {
120 char *volatile mem = items[iter];
121 assert_c_eq(*mem, magic, "Unexpected memory content");
122 size_t stashed_before = read_tcache_stashed_bytes(arena_ind);
123 free(mem);
124 if (*mem != magic) {
125 junked = true;
126 assert_c_eq(*mem, (char)uaf_detect_junk,
127 "Unexpected junk-filling bytes");
128 if (write_after_free) {
129 *(char *)mem = magic + 1;
130 }
131
132 size_t stashed_after = read_tcache_stashed_bytes(
133 arena_ind);
134 /*
135 * An edge case is the deallocation above triggering the
136 * tcache GC event, in which case the stashed pointers
137 * may get flushed immediately, before returning from
138 * free(). Treat these cases as checked already.
139 */
140 if (stashed_after <= stashed_before) {
141 fake_abort_called = true;
142 }
143 }
144 /* Flush tcache (including stashed). */
145 assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
146 0, "Unexpected tcache flush failure");
147 }
148 expect_true(junked, "Aligned ptr not junked");
149 if (write_after_free) {
150 test_write_after_free_post();
151 }
152
153 dallocx(items, flags);
154 do_arena_destroy(arena_ind);
155}
156
157TEST_BEGIN(test_read_after_free) {
158 test_skip_if(!uaf_detection_enabled());
159
160 test_use_after_free(sizeof(void *), /* write_after_free */ false);
161 test_use_after_free(sizeof(void *) + 1, /* write_after_free */ false);
162 test_use_after_free(16, /* write_after_free */ false);
163 test_use_after_free(20, /* write_after_free */ false);
164 test_use_after_free(32, /* write_after_free */ false);
165 test_use_after_free(33, /* write_after_free */ false);
166 test_use_after_free(48, /* write_after_free */ false);
167 test_use_after_free(64, /* write_after_free */ false);
168 test_use_after_free(65, /* write_after_free */ false);
169 test_use_after_free(129, /* write_after_free */ false);
170 test_use_after_free(255, /* write_after_free */ false);
171 test_use_after_free(256, /* write_after_free */ false);
172}
173TEST_END
174
175TEST_BEGIN(test_write_after_free) {
176 test_skip_if(!uaf_detection_enabled());
177
178 test_use_after_free(sizeof(void *), /* write_after_free */ true);
179 test_use_after_free(sizeof(void *) + 1, /* write_after_free */ true);
180 test_use_after_free(16, /* write_after_free */ true);
181 test_use_after_free(20, /* write_after_free */ true);
182 test_use_after_free(32, /* write_after_free */ true);
183 test_use_after_free(33, /* write_after_free */ true);
184 test_use_after_free(48, /* write_after_free */ true);
185 test_use_after_free(64, /* write_after_free */ true);
186 test_use_after_free(65, /* write_after_free */ true);
187 test_use_after_free(129, /* write_after_free */ true);
188 test_use_after_free(255, /* write_after_free */ true);
189 test_use_after_free(256, /* write_after_free */ true);
190}
191TEST_END
192
193static bool
194check_allocated_intact(void **allocated, size_t n_alloc) {
195 for (unsigned i = 0; i < n_alloc; i++) {
196 void *ptr = *(void **)allocated[i];
197 bool found = false;
198 for (unsigned j = 0; j < n_alloc; j++) {
199 if (ptr == allocated[j]) {
200 found = true;
201 break;
202 }
203 }
204 if (!found) {
205 return false;
206 }
207 }
208
209 return true;
210}
211
212TEST_BEGIN(test_use_after_free_integration) {
213 test_skip_if(!uaf_detection_enabled());
214
215 unsigned arena_ind = do_arena_create(-1, -1);
216 int flags = MALLOCX_ARENA(arena_ind);
217
218 size_t n_alloc = san_uaf_align * 2;
219 void **allocated = mallocx(n_alloc * sizeof(void *), flags);
220 assert_ptr_not_null(allocated, "Unexpected mallocx failure");
221
222 for (unsigned i = 0; i < n_alloc; i++) {
223 allocated[i] = mallocx(sizeof(void *) * 8, flags);
224 assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
225 if (i > 0) {
226 /* Emulate a circular list. */
227 *(void **)allocated[i] = allocated[i - 1];
228 }
229 }
230 *(void **)allocated[0] = allocated[n_alloc - 1];
231 expect_true(check_allocated_intact(allocated, n_alloc),
232 "Allocated data corrupted");
233
234 for (unsigned i = 0; i < n_alloc; i++) {
235 free(allocated[i]);
236 }
237 /* Read-after-free */
238 expect_false(check_allocated_intact(allocated, n_alloc),
239 "Junk-filling not detected");
240
241 test_write_after_free_pre();
242 for (unsigned i = 0; i < n_alloc; i++) {
243 allocated[i] = mallocx(sizeof(void *), flags);
244 assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
245 *(void **)allocated[i] = (void *)(uintptr_t)i;
246 }
247 /* Write-after-free */
248 for (unsigned i = 0; i < n_alloc; i++) {
249 free(allocated[i]);
250 *(void **)allocated[i] = NULL;
251 }
252 test_write_after_free_post();
253}
254TEST_END
255
256int
257main(void) {
258 return test(
259 test_read_after_free,
260 test_write_after_free,
261 test_use_after_free_integration);
262}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/witness.c b/examples/redis-unstable/deps/jemalloc/test/unit/witness.c
deleted file mode 100644
index 5a6c448..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/witness.c
+++ /dev/null
@@ -1,280 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static witness_lock_error_t *witness_lock_error_orig;
4static witness_owner_error_t *witness_owner_error_orig;
5static witness_not_owner_error_t *witness_not_owner_error_orig;
6static witness_depth_error_t *witness_depth_error_orig;
7
8static bool saw_lock_error;
9static bool saw_owner_error;
10static bool saw_not_owner_error;
11static bool saw_depth_error;
12
13static void
14witness_lock_error_intercept(const witness_list_t *witnesses,
15 const witness_t *witness) {
16 saw_lock_error = true;
17}
18
19static void
20witness_owner_error_intercept(const witness_t *witness) {
21 saw_owner_error = true;
22}
23
24static void
25witness_not_owner_error_intercept(const witness_t *witness) {
26 saw_not_owner_error = true;
27}
28
29static void
30witness_depth_error_intercept(const witness_list_t *witnesses,
31 witness_rank_t rank_inclusive, unsigned depth) {
32 saw_depth_error = true;
33}
34
35static int
36witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
37 expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
38
39 assert(oa == (void *)a);
40 assert(ob == (void *)b);
41
42 return strcmp(a->name, b->name);
43}
44
45static int
46witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b,
47 void *ob) {
48 expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
49
50 assert(oa == (void *)a);
51 assert(ob == (void *)b);
52
53 return -strcmp(a->name, b->name);
54}
55
56TEST_BEGIN(test_witness) {
57 witness_t a, b;
58 witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
59
60 test_skip_if(!config_debug);
61
62 witness_assert_lockless(&witness_tsdn);
63 witness_assert_depth(&witness_tsdn, 0);
64 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
65
66 witness_init(&a, "a", 1, NULL, NULL);
67 witness_assert_not_owner(&witness_tsdn, &a);
68 witness_lock(&witness_tsdn, &a);
69 witness_assert_owner(&witness_tsdn, &a);
70 witness_assert_depth(&witness_tsdn, 1);
71 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
72 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0);
73
74 witness_init(&b, "b", 2, NULL, NULL);
75 witness_assert_not_owner(&witness_tsdn, &b);
76 witness_lock(&witness_tsdn, &b);
77 witness_assert_owner(&witness_tsdn, &b);
78 witness_assert_depth(&witness_tsdn, 2);
79 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2);
80 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
81 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
82
83 witness_unlock(&witness_tsdn, &a);
84 witness_assert_depth(&witness_tsdn, 1);
85 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
86 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
87 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
88 witness_unlock(&witness_tsdn, &b);
89
90 witness_assert_lockless(&witness_tsdn);
91 witness_assert_depth(&witness_tsdn, 0);
92 witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
93}
94TEST_END
95
96TEST_BEGIN(test_witness_comp) {
97 witness_t a, b, c, d;
98 witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
99
100 test_skip_if(!config_debug);
101
102 witness_assert_lockless(&witness_tsdn);
103
104 witness_init(&a, "a", 1, witness_comp, &a);
105 witness_assert_not_owner(&witness_tsdn, &a);
106 witness_lock(&witness_tsdn, &a);
107 witness_assert_owner(&witness_tsdn, &a);
108 witness_assert_depth(&witness_tsdn, 1);
109
110 witness_init(&b, "b", 1, witness_comp, &b);
111 witness_assert_not_owner(&witness_tsdn, &b);
112 witness_lock(&witness_tsdn, &b);
113 witness_assert_owner(&witness_tsdn, &b);
114 witness_assert_depth(&witness_tsdn, 2);
115 witness_unlock(&witness_tsdn, &b);
116 witness_assert_depth(&witness_tsdn, 1);
117
118 witness_lock_error_orig = witness_lock_error;
119 witness_lock_error = witness_lock_error_intercept;
120 saw_lock_error = false;
121
122 witness_init(&c, "c", 1, witness_comp_reverse, &c);
123 witness_assert_not_owner(&witness_tsdn, &c);
124 expect_false(saw_lock_error, "Unexpected witness lock error");
125 witness_lock(&witness_tsdn, &c);
126 expect_true(saw_lock_error, "Expected witness lock error");
127 witness_unlock(&witness_tsdn, &c);
128 witness_assert_depth(&witness_tsdn, 1);
129
130 saw_lock_error = false;
131
132 witness_init(&d, "d", 1, NULL, NULL);
133 witness_assert_not_owner(&witness_tsdn, &d);
134 expect_false(saw_lock_error, "Unexpected witness lock error");
135 witness_lock(&witness_tsdn, &d);
136 expect_true(saw_lock_error, "Expected witness lock error");
137 witness_unlock(&witness_tsdn, &d);
138 witness_assert_depth(&witness_tsdn, 1);
139
140 witness_unlock(&witness_tsdn, &a);
141
142 witness_assert_lockless(&witness_tsdn);
143
144 witness_lock_error = witness_lock_error_orig;
145}
146TEST_END
147
148TEST_BEGIN(test_witness_reversal) {
149 witness_t a, b;
150 witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
151
152 test_skip_if(!config_debug);
153
154 witness_lock_error_orig = witness_lock_error;
155 witness_lock_error = witness_lock_error_intercept;
156 saw_lock_error = false;
157
158 witness_assert_lockless(&witness_tsdn);
159
160 witness_init(&a, "a", 1, NULL, NULL);
161 witness_init(&b, "b", 2, NULL, NULL);
162
163 witness_lock(&witness_tsdn, &b);
164 witness_assert_depth(&witness_tsdn, 1);
165 expect_false(saw_lock_error, "Unexpected witness lock error");
166 witness_lock(&witness_tsdn, &a);
167 expect_true(saw_lock_error, "Expected witness lock error");
168
169 witness_unlock(&witness_tsdn, &a);
170 witness_assert_depth(&witness_tsdn, 1);
171 witness_unlock(&witness_tsdn, &b);
172
173 witness_assert_lockless(&witness_tsdn);
174
175 witness_lock_error = witness_lock_error_orig;
176}
177TEST_END
178
179TEST_BEGIN(test_witness_recursive) {
180 witness_t a;
181 witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
182
183 test_skip_if(!config_debug);
184
185 witness_not_owner_error_orig = witness_not_owner_error;
186 witness_not_owner_error = witness_not_owner_error_intercept;
187 saw_not_owner_error = false;
188
189 witness_lock_error_orig = witness_lock_error;
190 witness_lock_error = witness_lock_error_intercept;
191 saw_lock_error = false;
192
193 witness_assert_lockless(&witness_tsdn);
194
195 witness_init(&a, "a", 1, NULL, NULL);
196
197 witness_lock(&witness_tsdn, &a);
198 expect_false(saw_lock_error, "Unexpected witness lock error");
199 expect_false(saw_not_owner_error, "Unexpected witness not owner error");
200 witness_lock(&witness_tsdn, &a);
201 expect_true(saw_lock_error, "Expected witness lock error");
202 expect_true(saw_not_owner_error, "Expected witness not owner error");
203
204 witness_unlock(&witness_tsdn, &a);
205
206 witness_assert_lockless(&witness_tsdn);
207
208 witness_owner_error = witness_owner_error_orig;
209 witness_lock_error = witness_lock_error_orig;
210
211}
212TEST_END
213
214TEST_BEGIN(test_witness_unlock_not_owned) {
215 witness_t a;
216 witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
217
218 test_skip_if(!config_debug);
219
220 witness_owner_error_orig = witness_owner_error;
221 witness_owner_error = witness_owner_error_intercept;
222 saw_owner_error = false;
223
224 witness_assert_lockless(&witness_tsdn);
225
226 witness_init(&a, "a", 1, NULL, NULL);
227
228 expect_false(saw_owner_error, "Unexpected owner error");
229 witness_unlock(&witness_tsdn, &a);
230 expect_true(saw_owner_error, "Expected owner error");
231
232 witness_assert_lockless(&witness_tsdn);
233
234 witness_owner_error = witness_owner_error_orig;
235}
236TEST_END
237
238TEST_BEGIN(test_witness_depth) {
239 witness_t a;
240 witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
241
242 test_skip_if(!config_debug);
243
244 witness_depth_error_orig = witness_depth_error;
245 witness_depth_error = witness_depth_error_intercept;
246 saw_depth_error = false;
247
248 witness_assert_lockless(&witness_tsdn);
249 witness_assert_depth(&witness_tsdn, 0);
250
251 witness_init(&a, "a", 1, NULL, NULL);
252
253 expect_false(saw_depth_error, "Unexpected depth error");
254 witness_assert_lockless(&witness_tsdn);
255 witness_assert_depth(&witness_tsdn, 0);
256
257 witness_lock(&witness_tsdn, &a);
258 witness_assert_lockless(&witness_tsdn);
259 witness_assert_depth(&witness_tsdn, 0);
260 expect_true(saw_depth_error, "Expected depth error");
261
262 witness_unlock(&witness_tsdn, &a);
263
264 witness_assert_lockless(&witness_tsdn);
265 witness_assert_depth(&witness_tsdn, 0);
266
267 witness_depth_error = witness_depth_error_orig;
268}
269TEST_END
270
271int
272main(void) {
273 return test(
274 test_witness,
275 test_witness_comp,
276 test_witness_reversal,
277 test_witness_recursive,
278 test_witness_unlock_not_owned,
279 test_witness_depth);
280}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero.c b/examples/redis-unstable/deps/jemalloc/test/unit/zero.c
deleted file mode 100644
index d3e81f1..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero.c
+++ /dev/null
@@ -1,59 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static void
4test_zero(size_t sz_min, size_t sz_max) {
5 uint8_t *s;
6 size_t sz_prev, sz, i;
7#define MAGIC ((uint8_t)0x61)
8
9 sz_prev = 0;
10 s = (uint8_t *)mallocx(sz_min, 0);
11 expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
12
13 for (sz = sallocx(s, 0); sz <= sz_max;
14 sz_prev = sz, sz = sallocx(s, 0)) {
15 if (sz_prev > 0) {
16 expect_u_eq(s[0], MAGIC,
17 "Previously allocated byte %zu/%zu is corrupted",
18 ZU(0), sz_prev);
19 expect_u_eq(s[sz_prev-1], MAGIC,
20 "Previously allocated byte %zu/%zu is corrupted",
21 sz_prev-1, sz_prev);
22 }
23
24 for (i = sz_prev; i < sz; i++) {
25 expect_u_eq(s[i], 0x0,
26 "Newly allocated byte %zu/%zu isn't zero-filled",
27 i, sz);
28 s[i] = MAGIC;
29 }
30
31 if (xallocx(s, sz+1, 0, 0) == sz) {
32 s = (uint8_t *)rallocx(s, sz+1, 0);
33 expect_ptr_not_null((void *)s,
34 "Unexpected rallocx() failure");
35 }
36 }
37
38 dallocx(s, 0);
39#undef MAGIC
40}
41
42TEST_BEGIN(test_zero_small) {
43 test_skip_if(!config_fill);
44 test_zero(1, SC_SMALL_MAXCLASS - 1);
45}
46TEST_END
47
48TEST_BEGIN(test_zero_large) {
49 test_skip_if(!config_fill);
50 test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1));
51}
52TEST_END
53
54int
55main(void) {
56 return test(
57 test_zero_small,
58 test_zero_large);
59}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero.sh b/examples/redis-unstable/deps/jemalloc/test/unit/zero.sh
deleted file mode 100644
index b4540b2..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero.sh
+++ /dev/null
@@ -1,5 +0,0 @@
1#!/bin/sh
2
3if [ "x${enable_fill}" = "x1" ] ; then
4 export MALLOC_CONF="abort:false,junk:false,zero:true"
5fi
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.c b/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.c
deleted file mode 100644
index a880d10..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.c
+++ /dev/null
@@ -1,26 +0,0 @@
1#include "test/jemalloc_test.h"
2
3#include <signal.h>
4
5static bool abort_called = false;
6
7void set_abort_called() {
8 abort_called = true;
9};
10
11TEST_BEGIN(test_realloc_abort) {
12 abort_called = false;
13 safety_check_set_abort(&set_abort_called);
14 void *ptr = mallocx(42, 0);
15 expect_ptr_not_null(ptr, "Unexpected mallocx error");
16 ptr = realloc(ptr, 0);
17 expect_true(abort_called, "Realloc with zero size didn't abort");
18}
19TEST_END
20
21int
22main(void) {
23 return test(
24 test_realloc_abort);
25}
26
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.sh b/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.sh
deleted file mode 100644
index 37daeea..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_abort.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="zero_realloc:abort"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.c b/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.c
deleted file mode 100644
index 65e07bd..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.c
+++ /dev/null
@@ -1,48 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static uint64_t
4allocated() {
5 if (!config_stats) {
6 return 0;
7 }
8 uint64_t allocated;
9 size_t sz = sizeof(allocated);
10 expect_d_eq(mallctl("thread.allocated", (void *)&allocated, &sz, NULL,
11 0), 0, "Unexpected mallctl failure");
12 return allocated;
13}
14
15static uint64_t
16deallocated() {
17 if (!config_stats) {
18 return 0;
19 }
20 uint64_t deallocated;
21 size_t sz = sizeof(deallocated);
22 expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
23 NULL, 0), 0, "Unexpected mallctl failure");
24 return deallocated;
25}
26
27TEST_BEGIN(test_realloc_alloc) {
28 void *ptr = mallocx(1, 0);
29 expect_ptr_not_null(ptr, "Unexpected mallocx error");
30 uint64_t allocated_before = allocated();
31 uint64_t deallocated_before = deallocated();
32 ptr = realloc(ptr, 0);
33 uint64_t allocated_after = allocated();
34 uint64_t deallocated_after = deallocated();
35 if (config_stats) {
36 expect_u64_lt(allocated_before, allocated_after,
37 "Unexpected stats change");
38 expect_u64_lt(deallocated_before, deallocated_after,
39 "Unexpected stats change");
40 }
41 dallocx(ptr, 0);
42}
43TEST_END
44int
45main(void) {
46 return test(
47 test_realloc_alloc);
48}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.sh b/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.sh
deleted file mode 100644
index 802687c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_alloc.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="zero_realloc:alloc"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.c b/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.c
deleted file mode 100644
index baed86c..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.c
+++ /dev/null
@@ -1,33 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static uint64_t
4deallocated() {
5 if (!config_stats) {
6 return 0;
7 }
8 uint64_t deallocated;
9 size_t sz = sizeof(deallocated);
10 expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
11 NULL, 0), 0, "Unexpected mallctl failure");
12 return deallocated;
13}
14
15TEST_BEGIN(test_realloc_free) {
16 void *ptr = mallocx(42, 0);
17 expect_ptr_not_null(ptr, "Unexpected mallocx error");
18 uint64_t deallocated_before = deallocated();
19 ptr = realloc(ptr, 0);
20 uint64_t deallocated_after = deallocated();
21 expect_ptr_null(ptr, "Realloc didn't free");
22 if (config_stats) {
23 expect_u64_gt(deallocated_after, deallocated_before,
24 "Realloc didn't free");
25 }
26}
27TEST_END
28
29int
30main(void) {
31 return test(
32 test_realloc_free);
33}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.sh b/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.sh
deleted file mode 100644
index 51b01c9..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_realloc_free.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="zero_realloc:free"
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.c b/examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.c
deleted file mode 100644
index 66c7a40..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.c
+++ /dev/null
@@ -1,40 +0,0 @@
1#include "test/jemalloc_test.h"
2
3static size_t
4zero_reallocs() {
5 if (!config_stats) {
6 return 0;
7 }
8 size_t count = 12345;
9 size_t sz = sizeof(count);
10
11 expect_d_eq(mallctl("stats.zero_reallocs", (void *)&count, &sz,
12 NULL, 0), 0, "Unexpected mallctl failure");
13 return count;
14}
15
16TEST_BEGIN(test_zero_reallocs) {
17 test_skip_if(!config_stats);
18
19 for (size_t i = 0; i < 100; ++i) {
20 void *ptr = mallocx(i * i + 1, 0);
21 expect_ptr_not_null(ptr, "Unexpected mallocx error");
22 size_t count = zero_reallocs();
23 expect_zu_eq(i, count, "Incorrect zero realloc count");
24 ptr = realloc(ptr, 0);
25 expect_ptr_null(ptr, "Realloc didn't free");
26 count = zero_reallocs();
27 expect_zu_eq(i + 1, count, "Realloc didn't adjust count");
28 }
29}
30TEST_END
31
32int
33main(void) {
34 /*
35 * We expect explicit counts; reentrant tests run multiple times, so
36 * counts leak across runs.
37 */
38 return test_no_reentrancy(
39 test_zero_reallocs);
40}
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.sh b/examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.sh
deleted file mode 100644
index 51b01c9..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/unit/zero_reallocs.sh
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/sh
2
3export MALLOC_CONF="zero_realloc:free"