Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Disable deadlock permutations in merge chunks tests #7595

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
126 changes: 75 additions & 51 deletions tsl/test/isolation/expected/merge_chunks_concurrent.out
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
unused step name: s3_compress_chunks
Parsed test spec with 4 sessions

starting permutation: s2_show_chunks s3_show_data s1_begin s3_begin s4_modify s2_merge_chunks s1_show_chunks s3_show_chunks s1_show_data s3_show_data s1_commit s1_show_data s3_commit
Expand Down Expand Up @@ -276,10 +277,7 @@ count
(1 row)


starting permutation: s2_set_lock_upgrade s4_wp_enable s2_show_chunks s1_begin s1_show_data s2_merge_chunks s1_show_data s1_row_exclusive_lock s4_wp_release s1_commit s1_show_data s1_show_chunks
step s2_set_lock_upgrade:
set timescaledb.merge_chunks_lock_upgrade_mode='upgrade';

starting permutation: s4_wp_enable s2_show_chunks s1_begin s1_show_data s2_merge_chunks s1_show_data s1_row_exclusive_lock s4_wp_release s1_commit s1_show_data s1_show_chunks
step s4_wp_enable: SELECT debug_waitpoint_enable('merge_chunks_before_heap_swap');
debug_waitpoint_enable
----------------------
Expand Down Expand Up @@ -338,17 +336,15 @@ num_device_all|num_device_1|num_device_5
4| 1| 0
(1 row)

step s1_row_exclusive_lock: call lock_one_chunk('readings'); <waiting ...>
step s1_row_exclusive_lock: call lock_one_chunk('readings');
step s4_wp_release: SELECT debug_waitpoint_release('merge_chunks_before_heap_swap');
debug_waitpoint_release
-----------------------

(1 row)

step s1_row_exclusive_lock: <... completed>
ERROR: deadlock detected
step s2_merge_chunks: <... completed>
step s1_commit: commit;
step s2_merge_chunks: <... completed>
step s1_show_data:
select * from readings order by time desc, device;
select count(*) as num_device_all, count(*) filter (where device=1) as num_device_1, count(*) filter (where device=5) as num_device_5 from readings;
Expand All @@ -373,10 +369,7 @@ count
(1 row)


starting permutation: s2_set_lock_upgrade_conditional s4_wp_enable s2_show_chunks s1_begin s1_show_data s2_merge_chunks s1_show_data s1_row_exclusive_lock s4_wp_release s1_commit s1_show_data s1_show_chunks
step s2_set_lock_upgrade_conditional:
set timescaledb.merge_chunks_lock_upgrade_mode='conditional';

starting permutation: s4_wp_enable s2_show_chunks s1_begin s2_merge_chunks s1_show_data s4_wp_release s1_commit s1_show_data s1_show_chunks
step s4_wp_enable: SELECT debug_waitpoint_enable('merge_chunks_before_heap_swap');
debug_waitpoint_enable
----------------------
Expand All @@ -398,10 +391,21 @@ step s1_begin:
t
(1 row)

step s2_merge_chunks:
call merge_all_chunks('readings');
<waiting ...>
step s1_show_data:
select * from readings order by time desc, device;
select count(*) as num_device_all, count(*) filter (where device=1) as num_device_1, count(*) filter (where device=5) as num_device_5 from readings;
<waiting ...>
step s4_wp_release: SELECT debug_waitpoint_release('merge_chunks_before_heap_swap');
debug_waitpoint_release
-----------------------

(1 row)

step s2_merge_chunks: <... completed>
step s1_show_data: <... completed>
time |device|temp
----------------------------+------+----
Mon Jan 01 02:00:00 2024 PST| 3| 3
Expand All @@ -415,9 +419,7 @@ num_device_all|num_device_1|num_device_5
4| 1| 0
(1 row)

step s2_merge_chunks:
call merge_all_chunks('readings');
<waiting ...>
step s1_commit: commit;
step s1_show_data:
select * from readings order by time desc, device;
select count(*) as num_device_all, count(*) filter (where device=1) as num_device_1, count(*) filter (where device=5) as num_device_5 from readings;
Expand All @@ -435,17 +437,38 @@ num_device_all|num_device_1|num_device_5
4| 1| 0
(1 row)

step s1_row_exclusive_lock: call lock_one_chunk('readings'); <waiting ...>
step s4_wp_release: SELECT debug_waitpoint_release('merge_chunks_before_heap_swap');
debug_waitpoint_release
-----------------------

step s1_show_chunks: select count(*) from show_chunks('readings');
count
-----
1
(1 row)


starting permutation: s2_set_lock_upgrade_conditional s4_wp_enable s2_show_chunks s1_begin s1_show_data s2_merge_chunks s1_show_data s1_row_exclusive_lock s4_wp_release s1_commit s1_show_data s1_show_chunks
step s2_set_lock_upgrade_conditional:
set timescaledb.merge_chunks_lock_upgrade_mode='conditional';

step s4_wp_enable: SELECT debug_waitpoint_enable('merge_chunks_before_heap_swap');
debug_waitpoint_enable
----------------------

(1 row)

step s2_show_chunks: select count(*) from show_chunks('readings');
count
-----
2
(1 row)

step s1_begin:
start transaction isolation level repeatable read;
select count(*) > 0 from pg_class;

?column?
--------
t
(1 row)

step s2_merge_chunks: <... completed>
ERROR: could not lock relation "_hyper_X_X_chunk" for merge
step s1_row_exclusive_lock: <... completed>
step s1_commit: commit;
step s1_show_data:
select * from readings order by time desc, device;
select count(*) as num_device_all, count(*) filter (where device=1) as num_device_1, count(*) filter (where device=5) as num_device_5 from readings;
Expand All @@ -463,35 +486,37 @@ num_device_all|num_device_1|num_device_5
4| 1| 0
(1 row)

step s1_show_chunks: select count(*) from show_chunks('readings');
count
-----
2
(1 row)
step s2_merge_chunks:
call merge_all_chunks('readings');
<waiting ...>
step s1_show_data:
select * from readings order by time desc, device;
select count(*) as num_device_all, count(*) filter (where device=1) as num_device_1, count(*) filter (where device=5) as num_device_5 from readings;

time |device|temp
----------------------------+------+----
Mon Jan 01 02:00:00 2024 PST| 3| 3
Mon Jan 01 02:00:00 2024 PST| 4| 4
Mon Jan 01 01:01:00 2024 PST| 2| 2
Mon Jan 01 01:00:00 2024 PST| 1| 1
(4 rows)

starting permutation: s4_wp_enable s2_merge_chunks s3_merge_chunks s4_wp_release s1_show_data s1_show_chunks
step s4_wp_enable: SELECT debug_waitpoint_enable('merge_chunks_before_heap_swap');
debug_waitpoint_enable
----------------------

num_device_all|num_device_1|num_device_5
--------------+------------+------------
4| 1| 0
(1 row)

step s2_merge_chunks:
call merge_all_chunks('readings');
<waiting ...>
step s3_merge_chunks:
call merge_all_chunks('readings');
<waiting ...>
step s1_row_exclusive_lock: call lock_one_chunk('readings'); <waiting ...>
step s4_wp_release: SELECT debug_waitpoint_release('merge_chunks_before_heap_swap');
debug_waitpoint_release
-----------------------

(1 row)

step s2_merge_chunks: <... completed>
step s3_merge_chunks: <... completed>
ERROR: relation does not exist
ERROR: could not lock relation "_hyper_X_X_chunk" for merge
step s1_row_exclusive_lock: <... completed>
step s1_commit: commit;
step s1_show_data:
select * from readings order by time desc, device;
select count(*) as num_device_all, count(*) filter (where device=1) as num_device_1, count(*) filter (where device=5) as num_device_5 from readings;
Expand All @@ -512,11 +537,11 @@ num_device_all|num_device_1|num_device_5
step s1_show_chunks: select count(*) from show_chunks('readings');
count
-----
1
2
(1 row)


starting permutation: s4_wp_enable s2_merge_chunks s3_compress_chunks s4_wp_release s1_show_data s1_show_chunks
starting permutation: s4_wp_enable s2_merge_chunks s3_merge_chunks s4_wp_release s1_show_data s1_show_chunks
step s4_wp_enable: SELECT debug_waitpoint_enable('merge_chunks_before_heap_swap');
debug_waitpoint_enable
----------------------
Expand All @@ -526,20 +551,22 @@ debug_waitpoint_enable
step s2_merge_chunks:
call merge_all_chunks('readings');
<waiting ...>
step s3_compress_chunks:
select compress_chunk(show_chunks('readings'));
step s3_merge_chunks:
call merge_all_chunks('readings');
<waiting ...>
step s4_wp_release: SELECT debug_waitpoint_release('merge_chunks_before_heap_swap');
debug_waitpoint_release
-----------------------

(1 row)

step s2_merge_chunks: <... completed>
step s3_merge_chunks: <... completed>
ERROR: relation does not exist
step s1_show_data:
select * from readings order by time desc, device;
select count(*) as num_device_all, count(*) filter (where device=1) as num_device_1, count(*) filter (where device=5) as num_device_5 from readings;
<waiting ...>
step s1_show_data: <... completed>

time |device|temp
----------------------------+------+----
Mon Jan 01 02:00:00 2024 PST| 3| 3
Expand All @@ -553,9 +580,6 @@ num_device_all|num_device_1|num_device_5
4| 1| 0
(1 row)

step s2_merge_chunks: <... completed>
step s3_compress_chunks: <... completed>
ERROR: deadlock detected
step s1_show_chunks: select count(*) from show_chunks('readings');
count
-----
Expand Down
25 changes: 20 additions & 5 deletions tsl/test/isolation/specs/merge_chunks_concurrent.spec
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,21 @@ permutation "s2_show_chunks" "s1_begin" "s1_show_data" "s2_merge_chunks" "s1_sho
# before doing the heap swap.
permutation "s2_set_lock_upgrade" "s2_show_chunks" "s1_begin" "s1_show_data" "s2_merge_chunks" "s1_show_data" "s1_commit" "s1_show_data" "s1_show_chunks"

# Same as the above, but it will deadlock because a reader takes a
# heavier lock.
permutation "s2_set_lock_upgrade" "s4_wp_enable" "s2_show_chunks" "s1_begin" "s1_show_data" "s2_merge_chunks" "s1_show_data" "s1_row_exclusive_lock" "s4_wp_release" "s1_commit" "s1_show_data" "s1_show_chunks"
# Same as the above, but it will deadlock because a reader upgrades
# from a read to a write lock. Since the permutation deadlocks, the
# output can be non-deterministic (on which process is killed) so it
# is not run by default.

#permutation "s2_set_lock_upgrade" "s4_wp_enable" "s2_show_chunks" "s1_begin" "s1_show_data" "s2_merge_chunks" "s1_show_data" "s1_row_exclusive_lock" "s4_wp_release" "s1_commit" "s1_show_data" "s1_show_chunks"

# Same as above but without lock upgrade. No deadlocks, but the merge
# blocks until the reader/writer is finished.

permutation "s4_wp_enable" "s2_show_chunks" "s1_begin" "s1_show_data" "s2_merge_chunks" "s1_show_data" "s1_row_exclusive_lock" "s4_wp_release" "s1_commit" "s1_show_data" "s1_show_chunks"

# Same as above, but the merger takes locks before the reader/writer
# so the reader/writer has to wait.
permutation "s4_wp_enable" "s2_show_chunks" "s1_begin" "s2_merge_chunks" "s1_show_data" "s4_wp_release" "s1_commit" "s1_show_data" "s1_show_chunks"

# Same as above but with a conditional lock. The merge process should
# fail with an error saying it can't take the lock needed for the
Expand All @@ -167,8 +179,11 @@ permutation "s2_set_lock_upgrade_conditional" "s4_wp_enable" "s2_show_chunks" "s
# Test concurrent merges
permutation "s4_wp_enable" "s2_merge_chunks" "s3_merge_chunks" "s4_wp_release" "s1_show_data" "s1_show_chunks"

# Test concurrent compress_chunk()
permutation "s4_wp_enable" "s2_merge_chunks" "s3_compress_chunks" "s4_wp_release" "s1_show_data" "s1_show_chunks"
# Test concurrent compress_chunk(). This will deadlock because
# compress_chunks takes chunk locks in a different order. The test is
# disabled because with a deadlock the output can be non-deterministic.

#permutation "s4_wp_enable" "s2_merge_chunks" "s3_compress_chunks" "s4_wp_release" "s1_show_data" "s1_show_chunks"

# Test concurrent drop table
permutation "s4_wp_enable" "s2_merge_chunks" "s3_drop_chunks" "s4_wp_release" "s1_show_data" "s1_show_chunks"
Loading