aboutsummaryrefslogtreecommitdiff
path: root/examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c')
-rw-r--r--examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c226
1 files changed, 226 insertions, 0 deletions
diff --git a/examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c b/examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c
new file mode 100644
index 0000000..af1110a
--- /dev/null
+++ b/examples/redis-unstable/deps/jemalloc/test/unit/edata_cache.c
@@ -0,0 +1,226 @@
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/edata_cache.h"
4
5static void
6test_edata_cache_init(edata_cache_t *edata_cache) {
7 base_t *base = base_new(TSDN_NULL, /* ind */ 1,
8 &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
9 assert_ptr_not_null(base, "");
10 bool err = edata_cache_init(edata_cache, base);
11 assert_false(err, "");
12}
13
14static void
15test_edata_cache_destroy(edata_cache_t *edata_cache) {
16 base_delete(TSDN_NULL, edata_cache->base);
17}
18
19TEST_BEGIN(test_edata_cache) {
20 edata_cache_t ec;
21 test_edata_cache_init(&ec);
22
23 /* Get one */
24 edata_t *ed1 = edata_cache_get(TSDN_NULL, &ec);
25 assert_ptr_not_null(ed1, "");
26
27 /* Cache should be empty */
28 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
29
30 /* Get another */
31 edata_t *ed2 = edata_cache_get(TSDN_NULL, &ec);
32 assert_ptr_not_null(ed2, "");
33
34 /* Still empty */
35 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
36
37 /* Put one back, and the cache should now have one item */
38 edata_cache_put(TSDN_NULL, &ec, ed1);
39 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 1, "");
40
41 /* Reallocating should reuse the item, and leave an empty cache. */
42 edata_t *ed1_again = edata_cache_get(TSDN_NULL, &ec);
43 assert_ptr_eq(ed1, ed1_again, "");
44 assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
45
46 test_edata_cache_destroy(&ec);
47}
48TEST_END
49
50static size_t
51ecf_count(edata_cache_fast_t *ecf) {
52 size_t count = 0;
53 edata_t *cur;
54 ql_foreach(cur, &ecf->list.head, ql_link_inactive) {
55 count++;
56 }
57 return count;
58}
59
60TEST_BEGIN(test_edata_cache_fast_simple) {
61 edata_cache_t ec;
62 edata_cache_fast_t ecf;
63
64 test_edata_cache_init(&ec);
65 edata_cache_fast_init(&ecf, &ec);
66
67 edata_t *ed1 = edata_cache_fast_get(TSDN_NULL, &ecf);
68 expect_ptr_not_null(ed1, "");
69 expect_zu_eq(ecf_count(&ecf), 0, "");
70 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
71
72 edata_t *ed2 = edata_cache_fast_get(TSDN_NULL, &ecf);
73 expect_ptr_not_null(ed2, "");
74 expect_zu_eq(ecf_count(&ecf), 0, "");
75 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
76
77 edata_cache_fast_put(TSDN_NULL, &ecf, ed1);
78 expect_zu_eq(ecf_count(&ecf), 1, "");
79 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
80
81 edata_cache_fast_put(TSDN_NULL, &ecf, ed2);
82 expect_zu_eq(ecf_count(&ecf), 2, "");
83 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
84
85 /* LIFO ordering. */
86 expect_ptr_eq(ed2, edata_cache_fast_get(TSDN_NULL, &ecf), "");
87 expect_zu_eq(ecf_count(&ecf), 1, "");
88 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
89
90 expect_ptr_eq(ed1, edata_cache_fast_get(TSDN_NULL, &ecf), "");
91 expect_zu_eq(ecf_count(&ecf), 0, "");
92 expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
93
94 test_edata_cache_destroy(&ec);
95}
96TEST_END
97
98TEST_BEGIN(test_edata_cache_fill) {
99 edata_cache_t ec;
100 edata_cache_fast_t ecf;
101
102 test_edata_cache_init(&ec);
103 edata_cache_fast_init(&ecf, &ec);
104
105 edata_t *allocs[EDATA_CACHE_FAST_FILL * 2];
106
107 /*
108 * If the fallback cache can't satisfy the request, we shouldn't do
109 * extra allocations until compelled to. Put half the fill goal in the
110 * fallback.
111 */
112 for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
113 allocs[i] = edata_cache_get(TSDN_NULL, &ec);
114 }
115 for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
116 edata_cache_put(TSDN_NULL, &ec, allocs[i]);
117 }
118 expect_zu_eq(EDATA_CACHE_FAST_FILL / 2,
119 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
120
121 allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
122 expect_zu_eq(EDATA_CACHE_FAST_FILL / 2 - 1, ecf_count(&ecf),
123 "Should have grabbed all edatas available but no more.");
124
125 for (int i = 1; i < EDATA_CACHE_FAST_FILL / 2; i++) {
126 allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
127 expect_ptr_not_null(allocs[i], "");
128 }
129 expect_zu_eq(0, ecf_count(&ecf), "");
130
131 /* When forced, we should alloc from the base. */
132 edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
133 expect_ptr_not_null(edata, "");
134 expect_zu_eq(0, ecf_count(&ecf), "Allocated more than necessary");
135 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED),
136 "Allocated more than necessary");
137
138 /*
139 * We should correctly fill in the common case where the fallback isn't
140 * exhausted, too.
141 */
142 for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
143 allocs[i] = edata_cache_get(TSDN_NULL, &ec);
144 expect_ptr_not_null(allocs[i], "");
145 }
146 for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
147 edata_cache_put(TSDN_NULL, &ec, allocs[i]);
148 }
149
150 allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
151 expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
152 expect_zu_eq(EDATA_CACHE_FAST_FILL,
153 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
154 for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
155 expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
156 expect_zu_eq(EDATA_CACHE_FAST_FILL,
157 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
158 allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
159 expect_ptr_not_null(allocs[i], "");
160 }
161 expect_zu_eq(0, ecf_count(&ecf), "");
162 expect_zu_eq(EDATA_CACHE_FAST_FILL,
163 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
164
165 allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
166 expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
167 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
168 for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
169 expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
170 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
171 allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
172 expect_ptr_not_null(allocs[i], "");
173 }
174 expect_zu_eq(0, ecf_count(&ecf), "");
175 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
176
177 test_edata_cache_destroy(&ec);
178}
179TEST_END
180
181TEST_BEGIN(test_edata_cache_disable) {
182 edata_cache_t ec;
183 edata_cache_fast_t ecf;
184
185 test_edata_cache_init(&ec);
186 edata_cache_fast_init(&ecf, &ec);
187
188 for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
189 edata_t *edata = edata_cache_get(TSDN_NULL, &ec);
190 expect_ptr_not_null(edata, "");
191 edata_cache_fast_put(TSDN_NULL, &ecf, edata);
192 }
193
194 expect_zu_eq(EDATA_CACHE_FAST_FILL, ecf_count(&ecf), "");
195 expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
196
197 edata_cache_fast_disable(TSDN_NULL, &ecf);
198
199 expect_zu_eq(0, ecf_count(&ecf), "");
200 expect_zu_eq(EDATA_CACHE_FAST_FILL,
201 atomic_load_zu(&ec.count, ATOMIC_RELAXED), "Disabling should flush");
202
203 edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
204 expect_zu_eq(0, ecf_count(&ecf), "");
205 expect_zu_eq(EDATA_CACHE_FAST_FILL - 1,
206 atomic_load_zu(&ec.count, ATOMIC_RELAXED),
207 "Disabled ecf should forward on get");
208
209 edata_cache_fast_put(TSDN_NULL, &ecf, edata);
210 expect_zu_eq(0, ecf_count(&ecf), "");
211 expect_zu_eq(EDATA_CACHE_FAST_FILL,
212 atomic_load_zu(&ec.count, ATOMIC_RELAXED),
213 "Disabled ecf should forward on put");
214
215 test_edata_cache_destroy(&ec);
216}
217TEST_END
218
219int
220main(void) {
221 return test(
222 test_edata_cache,
223 test_edata_cache_fast_simple,
224 test_edata_cache_fill,
225 test_edata_cache_disable);
226}