aboutsummaryrefslogtreecommitdiff
path: root/examples/redis-unstable/tests/integration/rdb.tcl
diff options
context:
space:
mode:
Diffstat (limited to 'examples/redis-unstable/tests/integration/rdb.tcl')
-rw-r--r--examples/redis-unstable/tests/integration/rdb.tcl681
1 files changed, 681 insertions, 0 deletions
diff --git a/examples/redis-unstable/tests/integration/rdb.tcl b/examples/redis-unstable/tests/integration/rdb.tcl
new file mode 100644
index 0000000..1daebde
--- /dev/null
+++ b/examples/redis-unstable/tests/integration/rdb.tcl
@@ -0,0 +1,681 @@
1tags {"rdb external:skip"} {
2
3set server_path [tmpdir "server.rdb-encoding-test"]
4
5# Copy RDB with different encodings in server path
6exec cp tests/assets/encodings.rdb $server_path
7exec cp tests/assets/list-quicklist.rdb $server_path
8
9start_server [list overrides [list "dir" $server_path "dbfilename" "list-quicklist.rdb" save ""]] {
10 test "test old version rdb file" {
11 r select 0
12 assert_equal [r get x] 7
13 assert_encoding listpack list
14 r lpop list
15 } {7}
16}
17
18start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] {
19 test "RDB encoding loading test" {
20 r select 0
21 csvdump r
22 } {"0","compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
23"0","hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000",
24"0","hash_zipped","hash","a","1","b","2","c","3",
25"0","list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000",
26"0","list_zipped","list","1","2","3","a","b","c","100000","6000000000",
27"0","number","string","10"
28"0","set","set","1","100000","2","3","6000000000","a","b","c",
29"0","set_zipped_1","set","1","2","3","4",
30"0","set_zipped_2","set","100000","200000","300000","400000",
31"0","set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000",
32"0","string","string","Hello World"
33"0","zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000",
34"0","zset_zipped","zset","a","1","b","2","c","3",
35}
36}
37
38set server_path [tmpdir "server.rdb-startup-test"]
39
40start_server [list overrides [list "dir" $server_path] keep_persistence true] {
41 test {Server started empty with non-existing RDB file} {
42 debug_digest
43 } {0000000000000000000000000000000000000000}
44 # Save an RDB file, needed for the next test.
45 r save
46}
47
48start_server [list overrides [list "dir" $server_path] keep_persistence true] {
49 test {Server started empty with empty RDB file} {
50 debug_digest
51 } {0000000000000000000000000000000000000000}
52}
53
54start_server [list overrides [list "dir" $server_path] keep_persistence true] {
55 test {Test RDB stream encoding} {
56 for {set j 0} {$j < 1000} {incr j} {
57 if {rand() < 0.9} {
58 r xadd stream * foo abc
59 } else {
60 r xadd stream * bar $j
61 }
62 }
63 r xgroup create stream mygroup 0
64 set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >]
65 r xdel stream [lindex [lindex [lindex [lindex $records 0] 1] 1] 0]
66 r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0]
67 set digest [debug_digest]
68 r config set sanitize-dump-payload no
69 r debug reload
70 set newdigest [debug_digest]
71 assert {$digest eq $newdigest}
72 }
73 test {Test RDB stream encoding - sanitize dump} {
74 r config set sanitize-dump-payload yes
75 r debug reload
76 set newdigest [debug_digest]
77 assert {$digest eq $newdigest}
78 }
79 # delete the stream, maybe valgrind will find something
80 r del stream
81}
82
83# Helper function to start a server and kill it, just to check the error
84# logged.
85set defaults {}
86proc start_server_and_kill_it {overrides code} {
87 upvar defaults defaults srv srv server_path server_path
88 set config [concat $defaults $overrides]
89 set srv [start_server [list overrides $config keep_persistence true]]
90 uplevel 1 $code
91 kill_server $srv
92}
93
94# Make the RDB file unreadable
95file attributes [file join $server_path dump.rdb] -permissions 0222
96
97# Detect root account (it is able to read the file even with 002 perm)
98set isroot 0
99catch {
100 open [file join $server_path dump.rdb]
101 set isroot 1
102}
103
104# Now make sure the server aborted with an error
105if {!$isroot} {
106 start_server_and_kill_it [list "dir" $server_path] {
107 test {Server should not start if RDB file can't be open} {
108 wait_for_condition 50 100 {
109 [string match {*Fatal error loading*} \
110 [exec tail -1 < [dict get $srv stdout]]]
111 } else {
112 fail "Server started even if RDB was unreadable!"
113 }
114 }
115 }
116}
117
118# Fix permissions of the RDB file.
119file attributes [file join $server_path dump.rdb] -permissions 0666
120
121# Corrupt its CRC64 checksum.
122set filesize [file size [file join $server_path dump.rdb]]
123set fd [open [file join $server_path dump.rdb] r+]
124fconfigure $fd -translation binary
125seek $fd -8 end
126puts -nonewline $fd "foobar00"; # Corrupt the checksum
127close $fd
128
129# Now make sure the server aborted with an error
130start_server_and_kill_it [list "dir" $server_path] {
131 test {Server should not start if RDB is corrupted} {
132 wait_for_condition 50 100 {
133 [string match {*CRC error*} \
134 [exec tail -10 < [dict get $srv stdout]]]
135 } else {
136 fail "Server started even if RDB was corrupted!"
137 }
138 }
139}
140
141start_server {} {
142 test {Test FLUSHALL aborts bgsave} {
143 r config set save ""
144 # 5000 keys with 1ms sleep per key should take 5 second
145 r config set rdb-key-save-delay 1000
146 populate 5000
147 assert_lessthan 999 [s rdb_changes_since_last_save]
148 r bgsave
149 assert_equal [s rdb_bgsave_in_progress] 1
150 r flushall
151 # wait a second max (bgsave should take 5)
152 wait_for_condition 10 100 {
153 [s rdb_bgsave_in_progress] == 0
154 } else {
155 fail "bgsave not aborted"
156 }
157 # verify that bgsave failed, by checking that the change counter is still high
158 assert_lessthan 999 [s rdb_changes_since_last_save]
159 # make sure the server is still writable
160 r set x xx
161 }
162
163 test {bgsave resets the change counter} {
164 r config set rdb-key-save-delay 0
165 r bgsave
166 wait_for_condition 50 100 {
167 [s rdb_bgsave_in_progress] == 0
168 } else {
169 fail "bgsave not done"
170 }
171 assert_equal [s rdb_changes_since_last_save] 0
172 }
173}
174
175test {client freed during loading} {
176 start_server [list overrides [list key-load-delay 50 loading-process-events-interval-bytes 1024 rdbcompression no save "900 1"]] {
177 # create a big rdb that will take long to load. it is important
178 # for keys to be big since the server processes events only once in 2mb.
179 # 100mb of rdb, 100k keys will load in more than 5 seconds
180 r debug populate 100000 key 1000
181
182 restart_server 0 false false
183
184 # make sure it's still loading
185 assert_equal [s loading] 1
186
187 # connect and disconnect 5 clients
188 set clients {}
189 for {set j 0} {$j < 5} {incr j} {
190 lappend clients [redis_deferring_client]
191 }
192 foreach rd $clients {
193 $rd debug log bla
194 }
195 foreach rd $clients {
196 $rd read
197 }
198 foreach rd $clients {
199 $rd close
200 }
201
202 # make sure the server freed the clients
203 wait_for_condition 100 100 {
204 [s connected_clients] < 3
205 } else {
206 fail "clients didn't disconnect"
207 }
208
209 # make sure it's still loading
210 assert_equal [s loading] 1
211
212 # no need to keep waiting for loading to complete
213 exec kill [srv 0 pid]
214 }
215}
216
217start_server {} {
218 test {Test RDB load info} {
219 r debug populate 1000
220 r save
221 assert {[r lastsave] <= [lindex [r time] 0]}
222 restart_server 0 true false
223 wait_done_loading r
224 assert {[s rdb_last_load_keys_expired] == 0}
225 assert {[s rdb_last_load_keys_loaded] == 1000}
226
227 r debug set-active-expire 0
228 for {set j 0} {$j < 1024} {incr j} {
229 r select [expr $j%16]
230 r set $j somevalue px 10
231 }
232 after 20
233
234 r save
235 restart_server 0 true false
236 wait_done_loading r
237 assert {[s rdb_last_load_keys_expired] == 1024}
238 assert {[s rdb_last_load_keys_loaded] == 1000}
239 }
240}
241
242# Our COW metrics (Private_Dirty) work only on Linux
243set system_name [string tolower [exec uname -s]]
244set page_size [exec getconf PAGESIZE]
245if {$system_name eq {linux} && $page_size == 4096} {
246
247start_server {overrides {save ""}} {
248 test {Test child sending info} {
249 # make sure that rdb_last_cow_size and current_cow_size are zero (the test using new server),
250 # so that the comparisons during the test will be valid
251 assert {[s current_cow_size] == 0}
252 assert {[s current_save_keys_processed] == 0}
253 assert {[s current_save_keys_total] == 0}
254
255 assert {[s rdb_last_cow_size] == 0}
256
257 # using a 200us delay, the bgsave is empirically taking about 10 seconds.
258 # we need it to take more than some 5 seconds, since redis only report COW once a second.
259 r config set rdb-key-save-delay 200
260 r config set loglevel debug
261
262 # populate the db with 10k keys of 512B each (since we want to measure the COW size by
263 # changing some keys and read the reported COW size, we are using small key size to prevent from
264 # the "dismiss mechanism" free memory and reduce the COW size)
265 set rd [redis_deferring_client 0]
266 set size 500 ;# aim for the 512 bin (sds overhead)
267 set cmd_count 10000
268 for {set k 0} {$k < $cmd_count} {incr k} {
269 $rd set key$k [string repeat A $size]
270 }
271
272 for {set k 0} {$k < $cmd_count} {incr k} {
273 catch { $rd read }
274 }
275
276 $rd close
277
278 # start background rdb save
279 r bgsave
280
281 set current_save_keys_total [s current_save_keys_total]
282 if {$::verbose} {
283 puts "Keys before bgsave start: $current_save_keys_total"
284 }
285
286 # on each iteration, we will write some key to the server to trigger copy-on-write, and
287 # wait to see that it reflected in INFO.
288 set iteration 1
289 set key_idx 0
290 while 1 {
291 # take samples before writing new data to the server
292 set cow_size [s current_cow_size]
293 if {$::verbose} {
294 puts "COW info before copy-on-write: $cow_size"
295 }
296
297 set keys_processed [s current_save_keys_processed]
298 if {$::verbose} {
299 puts "current_save_keys_processed info : $keys_processed"
300 }
301
302 # trigger copy-on-write
303 set modified_keys 16
304 for {set k 0} {$k < $modified_keys} {incr k} {
305 r setrange key$key_idx 0 [string repeat B $size]
306 incr key_idx 1
307 }
308
309 # changing 16 keys (512B each) will create at least 8192 COW (2 pages), but we don't want the test
310 # to be too strict, so we check for a change of at least 4096 bytes
311 set exp_cow [expr $cow_size + 4096]
312 # wait to see that current_cow_size value updated (as long as the child is in progress)
313 wait_for_condition 80 100 {
314 [s rdb_bgsave_in_progress] == 0 ||
315 [s current_cow_size] >= $exp_cow &&
316 [s current_save_keys_processed] > $keys_processed &&
317 [s current_fork_perc] > 0
318 } else {
319 if {$::verbose} {
320 puts "COW info on fail: [s current_cow_size]"
321 puts [exec tail -n 100 < [srv 0 stdout]]
322 }
323 fail "COW info wasn't reported"
324 }
325
326 # assert that $keys_processed is not greater than total keys.
327 assert_morethan_equal $current_save_keys_total $keys_processed
328
329 # for no accurate, stop after 2 iterations
330 if {!$::accurate && $iteration == 2} {
331 break
332 }
333
334 # stop iterating if the bgsave completed
335 if { [s rdb_bgsave_in_progress] == 0 } {
336 break
337 }
338
339 incr iteration 1
340 }
341
342 # make sure we saw report of current_cow_size
343 if {$iteration < 2 && $::verbose} {
344 puts [exec tail -n 100 < [srv 0 stdout]]
345 }
346 assert_morethan_equal $iteration 2
347
348 # if bgsave completed, check that rdb_last_cow_size (fork exit report)
349 # is at least 90% of last rdb_active_cow_size.
350 if { [s rdb_bgsave_in_progress] == 0 } {
351 set final_cow [s rdb_last_cow_size]
352 set cow_size [expr $cow_size * 0.9]
353 if {$final_cow < $cow_size && $::verbose} {
354 puts [exec tail -n 100 < [srv 0 stdout]]
355 }
356 assert_morethan_equal $final_cow $cow_size
357 }
358 }
359}
360} ;# system_name
361
362exec cp -f tests/assets/scriptbackup.rdb $server_path
363start_server [list overrides [list "dir" $server_path "dbfilename" "scriptbackup.rdb" "appendonly" "no"]] {
364 # the script is: "return redis.call('set', 'foo', 'bar')""
365 # its sha1 is: a0c38691e9fffe4563723c32ba77a34398e090e6
366 test {script won't load anymore if it's in rdb} {
367 assert_equal [r script exists a0c38691e9fffe4563723c32ba77a34398e090e6] 0
368 }
369}
370
371start_server {} {
372 test "failed bgsave prevents writes" {
373 # Make sure the server saves an RDB on shutdown
374 r config set save "900 1"
375
376 r config set rdb-key-save-delay 10000000
377 populate 1000
378 r set x x
379 r bgsave
380 set pid1 [get_child_pid 0]
381 catch {exec kill -9 $pid1}
382 waitForBgsave r
383
384 # make sure a read command succeeds
385 assert_equal [r get x] x
386
387 # make sure a write command fails
388 assert_error {MISCONF *} {r set x y}
389
390 # repeate with script
391 assert_error {MISCONF *} {r eval {
392 return redis.call('set','x',1)
393 } 1 x
394 }
395 assert_equal {x} [r eval {
396 return redis.call('get','x')
397 } 1 x
398 ]
399
400 # again with script using shebang
401 assert_error {MISCONF *} {r eval {#!lua
402 return redis.call('set','x',1)
403 } 1 x
404 }
405 assert_equal {x} [r eval {#!lua flags=no-writes
406 return redis.call('get','x')
407 } 1 x
408 ]
409
410 r config set rdb-key-save-delay 0
411 r bgsave
412 waitForBgsave r
413
414 # server is writable again
415 r set x y
416 } {OK}
417}
418
419start_server {overrides {save "900 1"}} {
420 test "rdb_saves_consecutive_failures metric" {
421 assert_equal [s rdb_saves_consecutive_failures] 0
422
423 # First bgsave failure
424 r config set rdb-key-save-delay 10000000
425 populate 100
426 r bgsave
427 set pid1 [get_child_pid 0]
428 catch {exec kill -9 $pid1}
429 waitForBgsave r
430
431 assert_equal [s rdb_saves_consecutive_failures] 1
432
433 # Second bgsave failure
434 r bgsave
435 set pid2 [get_child_pid 0]
436 catch {exec kill -9 $pid2}
437 waitForBgsave r
438
439 assert_equal [s rdb_saves_consecutive_failures] 2
440
441 # Successful bgsave should reset counter
442 r config set rdb-key-save-delay 0
443 r bgsave
444 waitForBgsave r
445
446 # Counter should be reset to 0 after success
447 assert_equal [s rdb_saves_consecutive_failures] 0
448 }
449}
450
451set server_path [tmpdir "server.partial-hfield-exp-test"]
452
453# verifies writing and reading hash key with expiring and persistent fields
454start_server [list overrides [list "dir" $server_path]] {
455 foreach {type lp_entries} {listpack 512 dict 0} {
456 test "HFE - save and load expired fields, expired soon after, or long after ($type)" {
457 r config set hash-max-listpack-entries $lp_entries
458
459 r FLUSHALL
460
461 r HMSET key a 1 b 2 c 3 d 4 e 5
462 # expected to be expired long after restart
463 r HEXPIREAT key 2524600800 FIELDS 1 a
464 # expected long TTL value (46 bits) is saved and loaded correctly
465 r HPEXPIREAT key 65755674080852 FIELDS 1 b
466 # expected to be already expired after restart
467 r HPEXPIRE key 80 FIELDS 1 d
468 # expected to be expired soon after restart
469 r HPEXPIRE key 200 FIELDS 1 e
470
471 r save
472 # sleep 101 ms to make sure d will expire after restart
473 after 101
474 restart_server 0 true false
475 wait_done_loading r
476
477 # Never be sure when active-expire kicks in into action
478 wait_for_condition 100 10 {
479 [lsort [r hgetall key]] == "1 2 3 a b c"
480 } else {
481 fail "hgetall of key is not as expected"
482 }
483
484 assert_equal [r hpexpiretime key FIELDS 3 a b c] {2524600800000 65755674080852 -1}
485 assert_equal [s rdb_last_load_keys_loaded] 1
486
487 # wait until expired_subkeys equals 2
488 wait_for_condition 10 100 {
489 [s expired_subkeys] == 2
490 } else {
491 fail "Value of expired_subkeys is not as expected"
492 }
493 }
494 }
495}
496
497set server_path [tmpdir "server.all-hfield-exp-test"]
498
499# verifies writing hash with several expired keys, and active-expiring it on load
500start_server [list overrides [list "dir" $server_path]] {
501 foreach {type lp_entries} {listpack 512 dict 0} {
502 test "HFE - save and load rdb all fields expired, ($type)" {
503 r config set hash-max-listpack-entries $lp_entries
504
505 r FLUSHALL
506
507 r HMSET key a 1 b 2 c 3 d 4
508 r HPEXPIRE key 100 FIELDS 4 a b c d
509
510 r save
511 # sleep 101 ms to make sure all fields will expire after restart
512 after 101
513
514 restart_server 0 true false
515 wait_done_loading r
516
517 # it is expected that no field was expired on load and the key was
518 # loaded, even though all its fields are actually expired.
519 assert_equal [s rdb_last_load_keys_loaded] 1
520
521 assert_equal [r hgetall key] {}
522 }
523 }
524}
525
526set server_path [tmpdir "server.listpack-to-dict-test"]
527
528test "save listpack, load dict" {
529 start_server [list overrides [list "dir" $server_path enable-debug-command yes]] {
530 r config set hash-max-listpack-entries 512
531
532 r FLUSHALL
533
534 r HMSET key a 1 b 2 c 3 d 4
535 assert_match "*encoding:listpack*" [r debug object key]
536 r HPEXPIRE key 100 FIELDS 1 d
537 r save
538
539 # sleep 200 ms to make sure 'd' will expire after when reloading
540 after 200
541
542 # change configuration and reload - result should be dict-encoded key
543 r config set hash-max-listpack-entries 0
544 r debug reload nosave
545
546 # first verify d was not expired during load (no expiry when loading
547 # a hash that was saved listpack-encoded)
548 assert_equal [s rdb_last_load_keys_loaded] 1
549
550 # d should be lazy expired in hgetall
551 assert_equal [lsort [r hgetall key]] "1 2 3 a b c"
552 assert_match "*encoding:hashtable*" [r debug object key]
553 }
554}
555
556set server_path [tmpdir "server.dict-to-listpack-test"]
557
558test "save dict, load listpack" {
559 start_server [list overrides [list "dir" $server_path enable-debug-command yes]] {
560 r config set hash-max-listpack-entries 0
561
562 r FLUSHALL
563
564 r HMSET key a 1 b 2 c 3 d 4
565 assert_match "*encoding:hashtable*" [r debug object key]
566 r HPEXPIRE key 200 FIELDS 1 d
567 r save
568
569 # sleep 201 ms to make sure 'd' will expire during reload
570 after 201
571
572 # change configuration and reload - result should be LP-encoded key
573 r config set hash-max-listpack-entries 512
574 r debug reload nosave
575
576 # verify d was expired during load
577 assert_equal [s rdb_last_load_keys_loaded] 1
578
579 assert_equal [lsort [r hgetall key]] "1 2 3 a b c"
580 assert_match "*encoding:listpack*" [r debug object key]
581 }
582}
583
584set server_path [tmpdir "server.active-expiry-after-load"]
585
586# verifies a field is correctly expired by active expiry AFTER loading from RDB
587foreach {type lp_entries} {listpack 512 dict 0} {
588 start_server [list overrides [list "dir" $server_path enable-debug-command yes]] {
589 test "active field expiry after load, ($type)" {
590 r config set hash-max-listpack-entries $lp_entries
591
592 r FLUSHALL
593
594 r HMSET key a 1 b 2 c 3 d 4 e 5 f 6
595 r HEXPIREAT key 2524600800 FIELDS 2 a b
596 r HPEXPIRE key 200 FIELDS 2 c d
597
598 r save
599 r debug reload nosave
600
601 # wait at most 2 secs to make sure 'c' and 'd' will active-expire
602 wait_for_condition 20 100 {
603 [s expired_subkeys] == 2
604 } else {
605 fail "expired hash fields is [s expired_subkeys] != 2"
606 }
607
608 assert_equal [s rdb_last_load_keys_loaded] 1
609
610 # hgetall might lazy expire fields, so it's only called after the stat asserts
611 assert_equal [lsort [r hgetall key]] "1 2 5 6 a b e f"
612 assert_equal [r hexpiretime key FIELDS 6 a b c d e f] {2524600800 2524600800 -2 -2 -1 -1}
613 }
614 }
615}
616
617set server_path [tmpdir "server.lazy-expiry-after-load"]
618
619foreach {type lp_entries} {listpack 512 dict 0} {
620 start_server [list overrides [list "dir" $server_path enable-debug-command yes]] {
621 test "lazy field expiry after load, ($type)" {
622 r config set hash-max-listpack-entries $lp_entries
623 r debug set-active-expire 0
624
625 r FLUSHALL
626
627 r HMSET key a 1 b 2 c 3 d 4 e 5 f 6
628 r HEXPIREAT key 2524600800 FIELDS 2 a b
629 r HPEXPIRE key 200 FIELDS 2 c d
630
631 r save
632 r debug reload nosave
633
634 # sleep 500 msec to make sure 'c' and 'd' will lazy-expire when calling hgetall
635 after 500
636
637 assert_equal [s rdb_last_load_keys_loaded] 1
638 assert_equal [s expired_subkeys] 0
639
640 # hgetall will lazy expire fields, so it's only called after the stat asserts
641 assert_equal [lsort [r hgetall key]] "1 2 5 6 a b e f"
642 assert_equal [r hexpiretime key FIELDS 6 a b c d e f] {2524600800 2524600800 -2 -2 -1 -1}
643 }
644 }
645}
646
647set server_path [tmpdir "server.unexpired-items-rax-list-boundary"]
648
649foreach {type lp_entries} {listpack 512 dict 0} {
650 start_server [list overrides [list "dir" $server_path enable-debug-command yes]] {
651 test "load un-expired items below and above rax-list boundary, ($type)" {
652 r config set hash-max-listpack-entries $lp_entries
653
654 r flushall
655
656 set hash_sizes {15 16 17 31 32 33}
657 foreach h $hash_sizes {
658 for {set i 1} {$i <= $h} {incr i} {
659 r hset key$h f$i v$i
660 r hexpireat key$h 2524600800 FIELDS 1 f$i
661 }
662 }
663
664 r save
665
666 restart_server 0 true false
667 wait_done_loading r
668
669 set hash_sizes {15 16 17 31 32 33}
670 foreach h $hash_sizes {
671 for {set i 1} {$i <= $h} {incr i} {
672 # random expiration time
673 assert_equal [r hget key$h f$i] v$i
674 assert_equal [r hexpiretime key$h FIELDS 1 f$i] 2524600800
675 }
676 }
677 }
678 }
679}
680
681} ;# tags