Skip to content

Commit

Permalink
Make parallel test deterministic
Browse files Browse the repository at this point in the history
One test query of the parallel test did not contain an ORDER BY.
Therefore, the test result was not deterministic. This patch adds the
missing ORDER BY.
  • Loading branch information
jnidzwetzki committed Nov 23, 2023
1 parent 897ef5a commit 8b3227a
Show file tree
Hide file tree
Showing 5 changed files with 99 additions and 72 deletions.
40 changes: 23 additions & 17 deletions test/expected/parallel-13.out
Original file line number Diff line number Diff line change
Expand Up @@ -285,37 +285,42 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
(1 row)

RESET parallel_leader_participation;
-- Test parallel chunk append is used
-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append)
SET parallel_tuple_cost = 0;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------------------
SET enable_indexscan = OFF;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------
Sort
Sort Key: test.i, _hyper_1_1_chunk.i
-> Hash Right Join
Hash Cond: (_hyper_1_1_chunk.i = test.i)
Sort Key: test.i, _hyper_1_1_chunk_1.i
-> Merge Left Join
Merge Cond: (test.i = _hyper_1_1_chunk_1.i)
-> Limit
-> Gather
-> Gather Merge
Workers Planned: 2
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (i < 500000)
-> Hash
-> Limit
-> Gather
Workers Planned: 2
-> Sort
Sort Key: test.i
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(23 rows)
-> Materialize
-> Limit
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: _hyper_1_1_chunk_1.i
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
Filter: (i < 500000)
(27 rows)

SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
i | j | ts | i | j | ts
----+------+-----------------------------+----+------+-----------------------------
0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969
Expand All @@ -330,6 +335,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1
90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969
(10 rows)

SET enable_indexscan = ON;
-- Test normal chunk append can be used in a parallel worker
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10;
QUERY PLAN
Expand Down
40 changes: 23 additions & 17 deletions test/expected/parallel-14.out
Original file line number Diff line number Diff line change
Expand Up @@ -285,37 +285,42 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
(1 row)

RESET parallel_leader_participation;
-- Test parallel chunk append is used
-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append)
SET parallel_tuple_cost = 0;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------------------
SET enable_indexscan = OFF;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------
Sort
Sort Key: test.i, _hyper_1_1_chunk.i
-> Hash Right Join
Hash Cond: (_hyper_1_1_chunk.i = test.i)
Sort Key: test.i, _hyper_1_1_chunk_1.i
-> Merge Left Join
Merge Cond: (test.i = _hyper_1_1_chunk_1.i)
-> Limit
-> Gather
-> Gather Merge
Workers Planned: 2
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (i < 500000)
-> Hash
-> Limit
-> Gather
Workers Planned: 2
-> Sort
Sort Key: test.i
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(23 rows)
-> Materialize
-> Limit
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: _hyper_1_1_chunk_1.i
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
Filter: (i < 500000)
(27 rows)

SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
i | j | ts | i | j | ts
----+------+-----------------------------+----+------+-----------------------------
0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969
Expand All @@ -330,6 +335,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1
90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969
(10 rows)

SET enable_indexscan = ON;
-- Test normal chunk append can be used in a parallel worker
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10;
QUERY PLAN
Expand Down
40 changes: 23 additions & 17 deletions test/expected/parallel-15.out
Original file line number Diff line number Diff line change
Expand Up @@ -286,37 +286,42 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
(1 row)

RESET parallel_leader_participation;
-- Test parallel chunk append is used
-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append)
SET parallel_tuple_cost = 0;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------------------
SET enable_indexscan = OFF;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------
Sort
Sort Key: test.i, _hyper_1_1_chunk.i
-> Hash Right Join
Hash Cond: (_hyper_1_1_chunk.i = test.i)
Sort Key: test.i, _hyper_1_1_chunk_1.i
-> Merge Left Join
Merge Cond: (test.i = _hyper_1_1_chunk_1.i)
-> Limit
-> Gather
-> Gather Merge
Workers Planned: 2
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (i < 500000)
-> Hash
-> Limit
-> Gather
Workers Planned: 2
-> Sort
Sort Key: test.i
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(23 rows)
-> Materialize
-> Limit
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: _hyper_1_1_chunk_1.i
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
Filter: (i < 500000)
(27 rows)

SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
i | j | ts | i | j | ts
----+------+-----------------------------+----+------+-----------------------------
0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969
Expand All @@ -331,6 +336,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1
90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969
(10 rows)

SET enable_indexscan = ON;
-- Test normal chunk append can be used in a parallel worker
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10;
QUERY PLAN
Expand Down
43 changes: 25 additions & 18 deletions test/expected/parallel-16.out
Original file line number Diff line number Diff line change
Expand Up @@ -286,37 +286,43 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
(1 row)

RESET parallel_leader_participation;
-- Test parallel chunk append is used
-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append)
SET parallel_tuple_cost = 0;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------------------
Sort
Sort Key: test.i, _hyper_1_1_chunk.i
-> Hash Right Join
Hash Cond: (_hyper_1_1_chunk.i = test.i)
SET enable_indexscan = OFF;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
QUERY PLAN
----------------------------------------------------------------------------------------------
Incremental Sort
Sort Key: test.i, _hyper_1_1_chunk_1.i
Presorted Key: test.i
-> Merge Left Join
Merge Cond: (test.i = _hyper_1_1_chunk_1.i)
-> Limit
-> Gather
-> Gather Merge
Workers Planned: 2
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (i < 500000)
-> Hash
-> Limit
-> Gather
Workers Planned: 2
-> Sort
Sort Key: test.i
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(23 rows)
-> Materialize
-> Limit
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: _hyper_1_1_chunk_1.i
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
Filter: (i < 500000)
(28 rows)

SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
i | j | ts | i | j | ts
----+------+-----------------------------+----+------+-----------------------------
0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969
Expand All @@ -331,6 +337,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1
90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969
(10 rows)

SET enable_indexscan = ON;
-- Test normal chunk append can be used in a parallel worker
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10;
QUERY PLAN
Expand Down
8 changes: 5 additions & 3 deletions test/sql/parallel.sql.in
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,12 @@ SET parallel_leader_participation = off;
SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
RESET parallel_leader_participation;

-- Test parallel chunk append is used
-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append)
SET parallel_tuple_cost = 0;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SET enable_indexscan = OFF;
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
SET enable_indexscan = ON;

-- Test normal chunk append can be used in a parallel worker
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10;
Expand Down

0 comments on commit 8b3227a

Please sign in to comment.