aboutsummaryrefslogtreecommitdiff
path: root/examples/redis-unstable/deps/jemalloc/test/integration/smallocx.c
diff options
context:
space:
mode:
Diffstat (limited to 'examples/redis-unstable/deps/jemalloc/test/integration/smallocx.c')
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/integration/smallocx.c312
1 files changed, 0 insertions, 312 deletions
diff --git a/examples/redis-unstable/deps/jemalloc/test/integration/smallocx.c b/examples/redis-unstable/deps/jemalloc/test/integration/smallocx.c
deleted file mode 100644
index 389319b..0000000
--- a/examples/redis-unstable/deps/jemalloc/test/integration/smallocx.c
+++ /dev/null
@@ -1,312 +0,0 @@
1#include "test/jemalloc_test.h"
2#include "jemalloc/jemalloc_macros.h"
3
4#define STR_HELPER(x) #x
5#define STR(x) STR_HELPER(x)
6
7#ifndef JEMALLOC_VERSION_GID_IDENT
8 #error "JEMALLOC_VERSION_GID_IDENT not defined"
9#endif
10
11#define JOIN(x, y) x ## y
12#define JOIN2(x, y) JOIN(x, y)
13#define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT)
14
15typedef struct {
16 void *ptr;
17 size_t size;
18} smallocx_return_t;
19
20extern smallocx_return_t
21smallocx(size_t size, int flags);
22
23static unsigned
24get_nsizes_impl(const char *cmd) {
25 unsigned ret;
26 size_t z;
27
28 z = sizeof(unsigned);
29 expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
30 "Unexpected mallctl(\"%s\", ...) failure", cmd);
31
32 return ret;
33}
34
35static unsigned
36get_nlarge(void) {
37 return get_nsizes_impl("arenas.nlextents");
38}
39
40static size_t
41get_size_impl(const char *cmd, size_t ind) {
42 size_t ret;
43 size_t z;
44 size_t mib[4];
45 size_t miblen = 4;
46
47 z = sizeof(size_t);
48 expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
49 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
50 mib[2] = ind;
51 z = sizeof(size_t);
52 expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
53 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
54
55 return ret;
56}
57
58static size_t
59get_large_size(size_t ind) {
60 return get_size_impl("arenas.lextent.0.size", ind);
61}
62
63/*
64 * On systems which can't merge extents, tests that call this function generate
65 * a lot of dirty memory very quickly. Purging between cycles mitigates
66 * potential OOM on e.g. 32-bit Windows.
67 */
68static void
69purge(void) {
70 expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
71 "Unexpected mallctl error");
72}
73
74/*
75 * GCC "-Walloc-size-larger-than" warning detects when one of the memory
76 * allocation functions is called with a size larger than the maximum size that
77 * they support. Here we want to explicitly test that the allocation functions
78 * do indeed fail properly when this is the case, which triggers the warning.
79 * Therefore we disable the warning for these tests.
80 */
81JEMALLOC_DIAGNOSTIC_PUSH
82JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
83
84TEST_BEGIN(test_overflow) {
85 size_t largemax;
86
87 largemax = get_large_size(get_nlarge()-1);
88
89 expect_ptr_null(smallocx(largemax+1, 0).ptr,
90 "Expected OOM for smallocx(size=%#zx, 0)", largemax+1);
91
92 expect_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
93 "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
94
95 expect_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
96 "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX);
97
98 expect_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
99 "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))",
100 ZU(PTRDIFF_MAX)+1);
101}
102TEST_END
103
104static void *
105remote_alloc(void *arg) {
106 unsigned arena;
107 size_t sz = sizeof(unsigned);
108 expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
109 "Unexpected mallctl() failure");
110 size_t large_sz;
111 sz = sizeof(size_t);
112 expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
113 NULL, 0), 0, "Unexpected mallctl failure");
114
115 smallocx_return_t r
116 = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
117 void *ptr = r.ptr;
118 expect_zu_eq(r.size,
119 nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE),
120 "Expected smalloc(size,flags).size == nallocx(size,flags)");
121 void **ret = (void **)arg;
122 *ret = ptr;
123
124 return NULL;
125}
126
127TEST_BEGIN(test_remote_free) {
128 thd_t thd;
129 void *ret;
130 thd_create(&thd, remote_alloc, (void *)&ret);
131 thd_join(thd, NULL);
132 expect_ptr_not_null(ret, "Unexpected smallocx failure");
133
134 /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
135 dallocx(ret, 0);
136 mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
137}
138TEST_END
139
140TEST_BEGIN(test_oom) {
141 size_t largemax;
142 bool oom;
143 void *ptrs[3];
144 unsigned i;
145
146 /*
147 * It should be impossible to allocate three objects that each consume
148 * nearly half the virtual address space.
149 */
150 largemax = get_large_size(get_nlarge()-1);
151 oom = false;
152 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
153 ptrs[i] = smallocx(largemax, 0).ptr;
154 if (ptrs[i] == NULL) {
155 oom = true;
156 }
157 }
158 expect_true(oom,
159 "Expected OOM during series of calls to smallocx(size=%zu, 0)",
160 largemax);
161 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
162 if (ptrs[i] != NULL) {
163 dallocx(ptrs[i], 0);
164 }
165 }
166 purge();
167
168#if LG_SIZEOF_PTR == 3
169 expect_ptr_null(smallocx(0x8000000000000000ULL,
170 MALLOCX_ALIGN(0x8000000000000000ULL)).ptr,
171 "Expected OOM for smallocx()");
172 expect_ptr_null(smallocx(0x8000000000000000ULL,
173 MALLOCX_ALIGN(0x80000000)).ptr,
174 "Expected OOM for smallocx()");
175#else
176 expect_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
177 "Expected OOM for smallocx()");
178#endif
179}
180TEST_END
181
182/* Re-enable the "-Walloc-size-larger-than=" warning */
183JEMALLOC_DIAGNOSTIC_POP
184
185TEST_BEGIN(test_basic) {
186#define MAXSZ (((size_t)1) << 23)
187 size_t sz;
188
189 for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
190 smallocx_return_t ret;
191 size_t nsz, rsz, smz;
192 void *p;
193 nsz = nallocx(sz, 0);
194 expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
195 ret = smallocx(sz, 0);
196 p = ret.ptr;
197 smz = ret.size;
198 expect_ptr_not_null(p,
199 "Unexpected smallocx(size=%zx, flags=0) error", sz);
200 rsz = sallocx(p, 0);
201 expect_zu_ge(rsz, sz, "Real size smaller than expected");
202 expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
203 expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
204 dallocx(p, 0);
205
206 ret = smallocx(sz, 0);
207 p = ret.ptr;
208 smz = ret.size;
209 expect_ptr_not_null(p,
210 "Unexpected smallocx(size=%zx, flags=0) error", sz);
211 dallocx(p, 0);
212
213 nsz = nallocx(sz, MALLOCX_ZERO);
214 expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
215 expect_zu_ne(smz, 0, "Unexpected smallocx() error");
216 ret = smallocx(sz, MALLOCX_ZERO);
217 p = ret.ptr;
218 expect_ptr_not_null(p,
219 "Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error",
220 nsz);
221 rsz = sallocx(p, 0);
222 expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
223 expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
224 dallocx(p, 0);
225 purge();
226 }
227#undef MAXSZ
228}
229TEST_END
230
231TEST_BEGIN(test_alignment_and_size) {
232 const char *percpu_arena;
233 size_t sz = sizeof(percpu_arena);
234
235 if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
236 strcmp(percpu_arena, "disabled") != 0) {
237 test_skip("test_alignment_and_size skipped: "
238 "not working with percpu arena.");
239 };
240#define MAXALIGN (((size_t)1) << 23)
241#define NITER 4
242 size_t nsz, rsz, smz, alignment, total;
243 unsigned i;
244 void *ps[NITER];
245
246 for (i = 0; i < NITER; i++) {
247 ps[i] = NULL;
248 }
249
250 for (alignment = 8;
251 alignment <= MAXALIGN;
252 alignment <<= 1) {
253 total = 0;
254 for (sz = 1;
255 sz < 3 * alignment && sz < (1U << 31);
256 sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
257 for (i = 0; i < NITER; i++) {
258 nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
259 MALLOCX_ZERO);
260 expect_zu_ne(nsz, 0,
261 "nallocx() error for alignment=%zu, "
262 "size=%zu (%#zx)", alignment, sz, sz);
263 smallocx_return_t ret
264 = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO);
265 ps[i] = ret.ptr;
266 expect_ptr_not_null(ps[i],
267 "smallocx() error for alignment=%zu, "
268 "size=%zu (%#zx)", alignment, sz, sz);
269 rsz = sallocx(ps[i], 0);
270 smz = ret.size;
271 expect_zu_ge(rsz, sz,
272 "Real size smaller than expected for "
273 "alignment=%zu, size=%zu", alignment, sz);
274 expect_zu_eq(nsz, rsz,
275 "nallocx()/sallocx() size mismatch for "
276 "alignment=%zu, size=%zu", alignment, sz);
277 expect_zu_eq(nsz, smz,
278 "nallocx()/smallocx() size mismatch for "
279 "alignment=%zu, size=%zu", alignment, sz);
280 expect_ptr_null(
281 (void *)((uintptr_t)ps[i] & (alignment-1)),
282 "%p inadequately aligned for"
283 " alignment=%zu, size=%zu", ps[i],
284 alignment, sz);
285 total += rsz;
286 if (total >= (MAXALIGN << 1)) {
287 break;
288 }
289 }
290 for (i = 0; i < NITER; i++) {
291 if (ps[i] != NULL) {
292 dallocx(ps[i], 0);
293 ps[i] = NULL;
294 }
295 }
296 }
297 purge();
298 }
299#undef MAXALIGN
300#undef NITER
301}
302TEST_END
303
304int
305main(void) {
306 return test(
307 test_overflow,
308 test_oom,
309 test_remote_free,
310 test_basic,
311 test_alignment_and_size);
312}