diff --git a/.unreleased/pr_6360 b/.unreleased/pr_6360 new file mode 100644 index 00000000000..5ea6a104de4 --- /dev/null +++ b/.unreleased/pr_6360 @@ -0,0 +1,3 @@ +Implements: #6360 Remove support for creating Continuous Aggregates with old format + +Thanks: @pdipesh02 for working on removing the old Continuous Aggregate format diff --git a/scripts/test_functions.inc b/scripts/test_functions.inc index 8e9127b33a1..ae8063aa81b 100644 --- a/scripts/test_functions.inc +++ b/scripts/test_functions.inc @@ -5,7 +5,7 @@ SCRIPT_DIR=$(dirname $0) # Run tests given as arguments. # # Options: -# -r Run repair tests as a separate pass (optional) +# -r Run repair tests (optional) # -k Keep temporary directory # -vN Use version N of the update tests (required) run_tests() ( @@ -17,24 +17,28 @@ run_tests() ( while getopts "kv:r" opt; do case $opt in - v) - TEST_VERSION=v$OPTARG - ;; - k) - DO_CLEANUP=false - ;; - r) - TEST_REPAIR=true - ;; + v) + TEST_VERSION=v$OPTARG + ;; + k) + DO_CLEANUP=false + ;; + r) + TEST_REPAIR=true + ;; + *) + exit 1 + ;; esac done shift $((OPTIND-1)) export TAGS="$@" - bash ${SCRIPT_DIR}/test_updates.sh if [[ "$TEST_REPAIR" = "true" ]]; then bash ${SCRIPT_DIR}/test_repairs.sh + else + bash ${SCRIPT_DIR}/test_updates.sh fi EXIT_CODE=$? if [ $EXIT_CODE -ne 0 ]; then diff --git a/scripts/test_updates_pg13.sh b/scripts/test_updates_pg13.sh index f73dfd15912..d965a043d19 100755 --- a/scripts/test_updates_pg13.sh +++ b/scripts/test_updates_pg13.sh @@ -10,11 +10,16 @@ source ${SCRIPT_DIR}/test_functions.inc run_tests "$@" -v7 \ 2.1.0-pg13 2.1.1-pg13 2.2.0-pg13 2.2.1-pg13 2.3.0-pg13 2.3.1-pg13 \ 2.4.0-pg13 2.4.1-pg13 2.4.2-pg13 + run_tests "$@" -v8 \ 2.5.0-pg13 2.5.1-pg13 2.5.2-pg13 2.6.0-pg13 2.6.1-pg13 2.7.0-pg13 2.7.1-pg13 2.7.2-pg13 \ 2.8.0-pg13 2.8.1-pg13 2.9.0-pg13 2.9.1-pg13 2.9.2-pg13 2.9.3-pg13 -# Also run repair tests for >=2.10.x versions due to PR #5441 +run_tests "$@" -v8 \ + 2.10.0-pg13 2.10.1-pg13 2.10.2-pg13 2.10.3-pg13 2.11.0-pg13 2.11.1-pg13 2.11.2-pg13 \ + 2.12.0-pg13 2.12.1-pg13 2.12.2-pg13 2.13.0-pg13 + +# Run repair tests for >= 2.10.x versions due to PR #5441 run_tests "$@" -r -v8 \ 2.10.0-pg13 2.10.1-pg13 2.10.2-pg13 2.10.3-pg13 2.11.0-pg13 2.11.1-pg13 2.11.2-pg13 \ 2.12.0-pg13 2.12.1-pg13 2.12.2-pg13 2.13.0-pg13 diff --git a/scripts/test_updates_pg14.sh b/scripts/test_updates_pg14.sh index 6098c744207..13f04853b1d 100755 --- a/scripts/test_updates_pg14.sh +++ b/scripts/test_updates_pg14.sh @@ -9,11 +9,16 @@ source ${SCRIPT_DIR}/test_functions.inc run_tests "$@" -v7 \ 2.5.0-pg14 2.5.1-pg14 + run_tests "$@" -v8 \ 2.5.0-pg14 2.5.1-pg14 2.5.2-pg14 2.6.0-pg14 2.6.1-pg14 2.7.0-pg14 2.7.1-pg14 2.7.2-pg14 \ 2.8.0-pg14 2.8.1-pg14 2.9.0-pg14 2.9.1-pg14 2.9.2-pg14 2.9.3-pg14 -# Also run repair tests for >=2.10.x versions due to PR #5441 +run_tests "$@" -v8 \ + 2.10.0-pg14 2.10.1-pg14 2.10.2-pg14 2.10.3-pg14 2.11.0-pg14 2.11.1-pg14 2.11.2-pg14 \ + 2.12.0-pg14 2.12.1-pg14 2.12.2-pg14 2.13.0-pg14 + +# Run repair tests for >=2.10.x versions due to PR #5441 run_tests "$@" -r -v8 \ 2.10.0-pg14 2.10.1-pg14 2.10.2-pg14 2.10.3-pg14 2.11.0-pg14 2.11.1-pg14 2.11.2-pg14 \ 2.12.0-pg14 2.12.1-pg14 2.12.2-pg14 2.13.0-pg14 diff --git a/scripts/test_updates_pg15.sh b/scripts/test_updates_pg15.sh index e39db259875..2553a8f7f17 100755 --- a/scripts/test_updates_pg15.sh +++ b/scripts/test_updates_pg15.sh @@ -10,7 +10,11 @@ source ${SCRIPT_DIR}/test_functions.inc run_tests "$@" -v8 \ 2.9.0-pg15 2.9.1-pg15 2.9.2-pg15 2.9.3-pg15 -# Also run repair tests for >=2.10.x versions due to PR #5441 +run_tests "$@" -v8 \ + 2.10.0-pg15 2.10.1-pg15 2.10.2-pg15 2.10.3-pg15 2.11.0-pg15 2.11.1-pg15 \ + 2.11.2-pg15 2.12.0-pg15 2.12.1-pg15 2.12.2-pg15 2.13.0-pg15 + +# Run repair tests for >=2.10.x versions due to PR #5441 run_tests "$@" -r -v8 \ 2.10.0-pg15 2.10.1-pg15 2.10.2-pg15 2.10.3-pg15 2.11.0-pg15 2.11.1-pg15 \ 2.11.2-pg15 2.12.0-pg15 2.12.1-pg15 2.12.2-pg15 2.13.0-pg15 diff --git a/scripts/test_updates_pg16.sh b/scripts/test_updates_pg16.sh index 73792aaa411..c47dbccc59f 100755 --- a/scripts/test_updates_pg16.sh +++ b/scripts/test_updates_pg16.sh @@ -7,7 +7,10 @@ SCRIPT_DIR=$(dirname $0) # shellcheck source=scripts/test_functions.inc source ${SCRIPT_DIR}/test_functions.inc -# Also run repair tests for >=2.10.x versions due to PR #5441 +run_tests "$@" -v8 \ + 2.13.0-pg16 + +# Run repair tests for >=2.10.x versions due to PR #5441 run_tests "$@" -r -v8 \ 2.13.0-pg16 diff --git a/sql/updates/post-update.sql b/sql/updates/post-update.sql index 5e211828bf2..aff81b21760 100644 --- a/sql/updates/post-update.sql +++ b/sql/updates/post-update.sql @@ -1,67 +1,50 @@ -- needed post 1.7.0 to fixup continuous aggregates created in 1.7.0 --- -DO $$ +DO +$$ DECLARE vname regclass; + mat_ht_id INTEGER; materialized_only bool; - ts_version TEXT; + finalized bool; + ts_major INTEGER; + ts_minor INTEGER; BEGIN - SELECT extversion INTO ts_version FROM pg_extension WHERE extname = 'timescaledb'; - IF ts_version >= '2.7.0' THEN - CREATE PROCEDURE _timescaledb_internal.post_update_cagg_try_repair( - cagg_view REGCLASS, force_rebuild boolean - ) AS '@MODULE_PATHNAME@', 'ts_cagg_try_repair' LANGUAGE C; + -- procedures with SET clause cannot execute transaction + -- control so we adjust search_path in procedure body + SET LOCAL search_path TO pg_catalog, pg_temp; + + SELECT ((string_to_array(extversion,'.'))[1])::int, ((string_to_array(extversion,'.'))[2])::int + INTO ts_major, ts_minor + FROM pg_extension WHERE extname = 'timescaledb'; + + IF ts_major >= 2 AND ts_minor >= 7 THEN + CREATE PROCEDURE _timescaledb_functions.post_update_cagg_try_repair( + cagg_view REGCLASS, force_rebuild BOOLEAN + ) AS '@MODULE_PATHNAME@', 'ts_cagg_try_repair' LANGUAGE C; END IF; - FOR vname, materialized_only IN select format('%I.%I', cagg.user_view_schema, cagg.user_view_name)::regclass, cagg.materialized_only from _timescaledb_catalog.continuous_agg cagg + + FOR vname, mat_ht_id, materialized_only, finalized IN + SELECT format('%I.%I', cagg.user_view_schema, cagg.user_view_name)::regclass, cagg.mat_hypertable_id, cagg.materialized_only, cagg.finalized + FROM _timescaledb_catalog.continuous_agg cagg LOOP - -- the cast from oid to text returns - -- quote_qualified_identifier (see regclassout). - -- - -- We use the if statement to handle pre-2.0 as well as - -- post-2.0. This could be turned into a procedure if we want - -- to have something more generic, but right now it is just - -- this case. - IF ts_version < '2.0.0' THEN - EXECUTE format('ALTER VIEW %s SET (timescaledb.materialized_only=%L) ', vname::text, materialized_only); - ELSIF ts_version < '2.7.0' THEN - EXECUTE format('ALTER MATERIALIZED VIEW %s SET (timescaledb.materialized_only=%L) ', vname::text, materialized_only); - ELSE - SET log_error_verbosity TO VERBOSE; - CALL _timescaledb_internal.post_update_cagg_try_repair(vname, false); - END IF; + IF ts_major < 2 THEN + EXECUTE format('ALTER VIEW %s SET (timescaledb.materialized_only=%L) ', vname::text, materialized_only); + + ELSIF ts_major = 2 AND ts_minor < 7 THEN + EXECUTE format('ALTER MATERIALIZED VIEW %s SET (timescaledb.materialized_only=%L) ', vname::text, materialized_only); + + ELSIF ts_major = 2 AND ts_minor >= 7 THEN + SET log_error_verbosity TO VERBOSE; + CALL _timescaledb_functions.post_update_cagg_try_repair(vname, false); + + END IF; END LOOP; - IF ts_version >= '2.7.0' THEN - DROP PROCEDURE IF EXISTS _timescaledb_internal.post_update_cagg_try_repair; - END IF; - EXCEPTION WHEN OTHERS THEN RAISE; -END -$$; --- For tsdb >= v2.10.0 apply the cagg repair when necessary -DO $$ -DECLARE - vname regclass; - materialized_only bool; - ts_version TEXT; -BEGIN - SELECT extversion INTO ts_version FROM pg_extension WHERE extname = 'timescaledb'; - IF ts_version >= '2.10.0' THEN - CREATE PROCEDURE _timescaledb_internal.post_update_cagg_try_repair( - cagg_view REGCLASS, force_rebuild BOOLEAN - ) AS '@MODULE_PATHNAME@', 'ts_cagg_try_repair' LANGUAGE C; - - FOR vname, materialized_only IN select format('%I.%I', cagg.user_view_schema, cagg.user_view_name)::regclass, cagg.materialized_only from _timescaledb_catalog.continuous_agg cagg - LOOP - IF ts_version >= '2.10.0' THEN - SET log_error_verbosity TO VERBOSE; - CALL _timescaledb_internal.post_update_cagg_try_repair(vname, true); - END IF; - END LOOP; - - DROP PROCEDURE IF EXISTS _timescaledb_internal.post_update_cagg_try_repair(REGCLASS, BOOLEAN); - END IF; - EXCEPTION WHEN OTHERS THEN RAISE; + IF ts_major >= 2 AND ts_minor >= 7 THEN + DROP PROCEDURE IF EXISTS _timescaledb_functions.post_update_cagg_try_repair(REGCLASS, BOOLEAN); + END IF; END -$$; +$$ LANGUAGE PLPGSQL; -- can only be dropped after views have been rebuilt DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_watermark(oid); @@ -152,12 +135,12 @@ BEGIN END $$; -- Create dimension partition information for existing space-partitioned hypertables -CREATE FUNCTION _timescaledb_internal.update_dimension_partition(hypertable REGCLASS) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_partition_update' LANGUAGE C VOLATILE; -SELECT _timescaledb_internal.update_dimension_partition(format('%I.%I', h.schema_name, h.table_name)) +CREATE FUNCTION _timescaledb_functions.update_dimension_partition(hypertable REGCLASS) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_partition_update' LANGUAGE C VOLATILE; +SELECT _timescaledb_functions.update_dimension_partition(format('%I.%I', h.schema_name, h.table_name)) FROM _timescaledb_catalog.hypertable h INNER JOIN _timescaledb_catalog.dimension d ON (d.hypertable_id = h.id) WHERE d.interval_length IS NULL; -DROP FUNCTION _timescaledb_internal.update_dimension_partition; +DROP FUNCTION _timescaledb_functions.update_dimension_partition; -- Report warning when partial aggregates are used DO $$ diff --git a/test/sql/updates/post.repair.sql b/test/sql/updates/post.repair.sql index acce4bf57b0..b96981fc2b6 100644 --- a/test/sql/updates/post.repair.sql +++ b/test/sql/updates/post.repair.sql @@ -2,13 +2,15 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. -SELECT extversion < '2.10.0' AS test_repair_dimension -FROM pg_extension -WHERE extname = 'timescaledb' \gset +SELECT + split_part(extversion, '.', 1)::int * 100000 + + split_part(extversion, '.', 2)::int * 100 AS extversion_num +FROM + pg_extension WHERE extname = 'timescaledb' \gset -SELECT extversion >= '2.10.0' AND :'TEST_VERSION' >= 'v8' AS test_repair_cagg_joins -FROM pg_extension -WHERE extname = 'timescaledb' \gset +SELECT + :extversion_num < 201000 AS test_repair_dimension, + :extversion_num >= 201000 AND :'TEST_VERSION' >= 'v8' AS test_repair_cagg_joins \gset \if :test_repair_dimension -- Re-add the dropped foreign key constraint that was dropped for diff --git a/test/sql/updates/setup.continuous_aggs.sql b/test/sql/updates/setup.continuous_aggs.sql index a443ee9e58e..113f8ea7306 100644 --- a/test/sql/updates/setup.continuous_aggs.sql +++ b/test/sql/updates/setup.continuous_aggs.sql @@ -7,9 +7,13 @@ -- we keep them separate anyway so that we can do additional checking -- if necessary. SELECT - extversion < '2.0.0' AS has_refresh_mat_view - FROM pg_extension - WHERE extname = 'timescaledb' \gset + split_part(extversion, '.', 1)::int * 100000 + + split_part(extversion, '.', 2)::int * 100 AS extversion_num +FROM + pg_extension WHERE extname = 'timescaledb' \gset + +SELECT + :extversion_num < 200000 AS has_refresh_mat_view \gset CREATE TYPE custom_type AS (high int, low int); diff --git a/test/sql/updates/setup.continuous_aggs.v2.sql b/test/sql/updates/setup.continuous_aggs.v2.sql index ed46d7c8638..955f001b6be 100644 --- a/test/sql/updates/setup.continuous_aggs.v2.sql +++ b/test/sql/updates/setup.continuous_aggs.v2.sql @@ -19,9 +19,7 @@ SELECT :ts_major < 2 AS has_ignore_invalidations_older_than, :ts_major < 2 AS has_max_interval_per_job, :ts_major >= 2 AS has_create_mat_view, - :ts_major >= 2 AS has_continuous_aggs_policy, - :ts_major = 2 AND :ts_minor >= 7 AS has_continuous_aggs_finals_form, - :ts_major = 2 AND :ts_minor IN (7,8) AS has_continuous_aggs_finalized_option + :ts_major >= 2 AS has_continuous_aggs_policy FROM pg_extension WHERE extname = 'timescaledb' \gset @@ -63,11 +61,7 @@ SELECT generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timest -- we had a bug related to that and need to verify if compression can be -- enabled on such a view CREATE MATERIALIZED VIEW rename_cols - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS - \else WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS - \endif \endif SELECT time_bucket('1 week', timec) AS bucket, location, @@ -85,11 +79,7 @@ SELECT generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timest WITH ( timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.refresh_lag='-30 day', timescaledb.max_interval_per_job ='1000 day') \else CREATE MATERIALIZED VIEW IF NOT EXISTS mat_before - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) - \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true) - \endif \endif AS SELECT time_bucket('1week', timec) as bucket, @@ -165,11 +155,7 @@ CREATE SCHEMA cagg; WITH ( timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.refresh_lag='-30 day', timescaledb.max_interval_per_job ='1000 day') \else CREATE MATERIALIZED VIEW IF NOT EXISTS cagg.realtime_mat - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) - \else WITH ( timescaledb.continuous, timescaledb.materialized_only=false) - \endif \endif AS SELECT time_bucket('1week', timec) as bucket, @@ -236,11 +222,7 @@ CALL refresh_continuous_aggregate('cagg.realtime_mat',NULL,NULL); timescaledb.max_interval_per_job = '100000 days') \else CREATE MATERIALIZED VIEW IF NOT EXISTS mat_ignoreinval - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) - \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true) - \endif \endif AS SELECT time_bucket('1 week', timec) as bucket, @@ -277,11 +259,7 @@ SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-20 00:00'::timest timescaledb.max_interval_per_job='100000 days' ) \else CREATE MATERIALIZED VIEW mat_inval - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) - \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) - \endif \endif AS SELECT time_bucket('10 minute', time) as bucket, location, min(temperature) as min_temp, @@ -325,11 +303,7 @@ INSERT INTO int_time_test VALUES timescaledb.refresh_interval='12 hours') \else CREATE MATERIALIZED VIEW mat_inttime - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) - \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) - \endif \endif AS SELECT time_bucket( 2, timeval), COUNT(col1) @@ -347,11 +321,7 @@ INSERT INTO int_time_test VALUES timescaledb.refresh_interval='12 hours') \else CREATE MATERIALIZED VIEW mat_inttime2 - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) - \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) - \endif \endif AS SELECT time_bucket( 2, timeval), COUNT(col1) @@ -385,11 +355,7 @@ SELECT create_hypertable('conflict_test', 'time', chunk_time_interval => INTERVA timescaledb.refresh_interval='12 hours' ) \else CREATE MATERIALIZED VIEW mat_conflict - \if :has_continuous_aggs_finalized_option - WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) - \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) - \endif \endif AS SELECT time_bucket('10 minute', time) as bucket, location, min(temperature) as min_temp, @@ -448,9 +414,6 @@ WITH ( \if :has_max_interval_per_job timescaledb.refresh_lag='-30 day', timescaledb.max_interval_per_job ='1000 day', -\endif -\if :has_continuous_aggs_finalized_option - timescaledb.finalized = false, \endif timescaledb.continuous ) AS diff --git a/test/sql/updates/setup.policies.sql b/test/sql/updates/setup.policies.sql index de5752e7cbd..d08902b649d 100644 --- a/test/sql/updates/setup.policies.sql +++ b/test/sql/updates/setup.policies.sql @@ -14,9 +14,7 @@ SELECT WHERE extname = 'timescaledb' \gset SELECT - :ts_major < 2 AS has_drop_chunks_policy - FROM pg_extension - WHERE extname = 'timescaledb' \gset + :ts_major < 2 AS has_drop_chunks_policy \gset DO LANGUAGE PLPGSQL $$ DECLARE diff --git a/test/sql/updates/setup.post-downgrade.sql b/test/sql/updates/setup.post-downgrade.sql index 0dbb1e77bad..ed725e4a17b 100644 --- a/test/sql/updates/setup.post-downgrade.sql +++ b/test/sql/updates/setup.post-downgrade.sql @@ -10,9 +10,14 @@ -- directly and prevent a diff between the clean-rerun version and the -- upgrade-downgrade version of the database. -SELECT extversion >= '2.0.0' AS has_create_mat_view - FROM pg_extension - WHERE extname = 'timescaledb' \gset +SELECT + split_part(extversion, '.', 1)::int * 100000 + + split_part(extversion, '.', 2)::int * 100 AS extversion_num +FROM + pg_extension WHERE extname = 'timescaledb' \gset + +SELECT + :extversion_num >= 200000 AS has_create_mat_view \gset -- Rebuild the user views based on the renamed views \if :has_create_mat_view diff --git a/test/sql/updates/setup.repair.cagg.sql b/test/sql/updates/setup.repair.cagg.sql index 4beb074d931..463fef44fbb 100644 --- a/test/sql/updates/setup.repair.cagg.sql +++ b/test/sql/updates/setup.repair.cagg.sql @@ -8,10 +8,16 @@ -- be differences and the update tests would fail. DO $$ DECLARE - ts_version TEXT; + ts_version INTEGER; BEGIN - SELECT extversion INTO ts_version FROM pg_extension WHERE extname = 'timescaledb'; - IF ts_version >= '2.0.0' AND ts_version < '2.7.0' THEN + SELECT + split_part(extversion, '.', 1)::int * 100000 + + split_part(extversion, '.', 2)::int * 100 + INTO ts_version + FROM + pg_extension WHERE extname = 'timescaledb'; + + IF ts_version >= 200000 AND ts_version < 200700 THEN CREATE TABLE conditions_v3 ( timec TIMESTAMPTZ NOT NULL, @@ -40,6 +46,6 @@ BEGIN GROUP BY bucket, temperature WITH NO DATA; - END IF; + END IF; END $$; diff --git a/test/sql/updates/setup.repair.sql b/test/sql/updates/setup.repair.sql index 3d12fbe2e7c..b529f5964de 100644 --- a/test/sql/updates/setup.repair.sql +++ b/test/sql/updates/setup.repair.sql @@ -7,13 +7,15 @@ -- the dimension slice table. The repair script should then repair all -- of them and there should be no dimension slices missing. -SELECT extversion < '2.10.0' AS test_repair_dimension -FROM pg_extension -WHERE extname = 'timescaledb' \gset - -SELECT extversion >= '2.10.0' AS has_cagg_joins -FROM pg_extension -WHERE extname = 'timescaledb' \gset +SELECT + split_part(extversion, '.', 1)::int * 100000 + + split_part(extversion, '.', 2)::int * 100 AS extversion_num +FROM + pg_extension WHERE extname = 'timescaledb' \gset + +SELECT + :extversion_num < 201000 AS test_repair_dimension, + :extversion_num >= 201000 AS has_cagg_joins \gset CREATE USER wizard; CREATE USER "Random L User"; diff --git a/tsl/src/continuous_aggs/common.c b/tsl/src/continuous_aggs/common.c index d0a5e01e162..796f71d0343 100644 --- a/tsl/src/continuous_aggs/common.c +++ b/tsl/src/continuous_aggs/common.c @@ -14,7 +14,6 @@ static void caggtimebucketinfo_init(CAggTimebucketInfo *src, int32 hypertable_id int32 parent_mat_hypertable_id); static void caggtimebucket_validate(CAggTimebucketInfo *tbinfo, List *groupClause, List *targetList, bool is_cagg_create); -static bool cagg_agg_validate(Node *node, void *context); static bool cagg_query_supported(const Query *query, StringInfo hint, StringInfo detail, const bool finalized); static Oid cagg_get_boundary_converter_funcoid(Oid typoid); @@ -378,50 +377,6 @@ caggtimebucket_validate(CAggTimebucketInfo *tbinfo, List *groupClause, List *tar elog(ERROR, "continuous aggregate view must include a valid time bucket function"); } -static bool -cagg_agg_validate(Node *node, void *context) -{ - if (node == NULL) - return false; - - if (IsA(node, Aggref)) - { - Aggref *agg = (Aggref *) node; - HeapTuple aggtuple; - Form_pg_aggregate aggform; - if (agg->aggorder || agg->aggdistinct || agg->aggfilter) - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("aggregates with FILTER / DISTINCT / ORDER BY are not supported"))); - } - /* Fetch the pg_aggregate row. */ - aggtuple = SearchSysCache1(AGGFNOID, agg->aggfnoid); - if (!HeapTupleIsValid(aggtuple)) - elog(ERROR, "cache lookup failed for aggregate %u", agg->aggfnoid); - aggform = (Form_pg_aggregate) GETSTRUCT(aggtuple); - if (aggform->aggkind != 'n') - { - ReleaseSysCache(aggtuple); - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ordered set/hypothetical aggregates are not supported"))); - } - if (!OidIsValid(aggform->aggcombinefn) || - (aggform->aggtranstype == INTERNALOID && !OidIsValid(aggform->aggdeserialfn))) - { - ReleaseSysCache(aggtuple); - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("aggregates which are not parallelizable are not supported"))); - } - ReleaseSysCache(aggtuple); - - return false; - } - return expression_tree_walker(node, cagg_agg_validate, context); -} - /* * Check query and extract error details and error hints. * @@ -432,13 +387,6 @@ cagg_agg_validate(Node *node, void *context) static bool cagg_query_supported(const Query *query, StringInfo hint, StringInfo detail, const bool finalized) { -/* - * For now deprecate partial aggregates on release builds only. - * Once migration tests are made compatible with PG15 enable deprecation - * on debug builds as well. - */ -#ifndef DEBUG -#if PG15_GE if (!finalized) { /* continuous aggregates with old format will not be allowed */ @@ -449,8 +397,6 @@ cagg_query_supported(const Query *query, StringInfo hint, StringInfo detail, con "to true."); return false; } -#endif -#endif if (!query->jointree->fromlist) { appendStringInfoString(hint, "FROM clause missing in the query"); @@ -488,17 +434,6 @@ cagg_query_supported(const Query *query, StringInfo hint, StringInfo detail, con return false; } - if (query->sortClause && !finalized) - { - appendStringInfoString(detail, - "ORDER BY is not supported in queries defining continuous " - "aggregates."); - appendStringInfoString(hint, - "Use ORDER BY clauses in SELECTS from the continuous aggregate view " - "instead."); - return false; - } - if (query->hasRecursive || query->hasSubLinks || query->hasTargetSRFs || query->cteList) { appendStringInfoString(detail, @@ -649,13 +584,6 @@ cagg_validate_query(const Query *query, const bool finalized, const char *cagg_s detail->len > 0 ? errdetail("%s", detail->data) : 0)); } - /* Finalized cagg doesn't have those restrictions anymore. */ - if (!finalized) - { - /* Validate aggregates allowed. */ - cagg_agg_validate((Node *) query->targetList, NULL); - cagg_agg_validate((Node *) query->havingQual, NULL); - } /* Check if there are only two tables in the from list. */ fromList = query->jointree->fromlist; if (list_length(fromList) > CONTINUOUS_AGG_MAX_JOIN_RELATIONS) @@ -669,13 +597,6 @@ cagg_validate_query(const Query *query, const bool finalized, const char *cagg_s if (list_length(fromList) == CONTINUOUS_AGG_MAX_JOIN_RELATIONS || !IsA(linitial(query->jointree->fromlist), RangeTblRef)) { - /* Using old format caggs is not supported */ - if (!finalized) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("old format of continuous aggregate is not supported with joins"), - errhint("Set timescaledb.finalized to TRUE."))); - if (list_length(fromList) == CONTINUOUS_AGG_MAX_JOIN_RELATIONS) { if (!IsA(linitial(fromList), RangeTblRef) || !IsA(lsecond(fromList), RangeTblRef)) diff --git a/tsl/src/continuous_aggs/create.c b/tsl/src/continuous_aggs/create.c index 945be6f54c8..93ea8a97236 100644 --- a/tsl/src/continuous_aggs/create.c +++ b/tsl/src/continuous_aggs/create.c @@ -751,13 +751,11 @@ cagg_create(const CreateTableAsStmt *create_stmt, ViewStmt *stmt, Query *panquer stmt->options = NULL; /* - * Step 0: Add any internal columns needed for materialization based - * on the user query's table. + * Old format caggs are not supported anymore, there is no need to add + * an internal chunk id column for materialized hypertable. + * + * Step 1: create the materialization table. */ - if (!finalized) - mattablecolumninfo_addinternal(&mattblinfo); - - /* Step 1: create the materialization table. */ ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); materialize_hypertable_id = ts_catalog_table_next_seq_id(ts_catalog_get(), HYPERTABLE); ts_catalog_restore_user(&sec_ctx); diff --git a/tsl/src/continuous_aggs/finalize.c b/tsl/src/continuous_aggs/finalize.c index 9fe1854e132..8674cbb110e 100644 --- a/tsl/src/continuous_aggs/finalize.c +++ b/tsl/src/continuous_aggs/finalize.c @@ -16,25 +16,10 @@ #include "common.h" #include -typedef struct CAggHavingCxt -{ - List *origq_tlist; - List *finalizeq_tlist; - AggPartCxt agg_cxt; -} CAggHavingCxt; - /* Static function prototypes */ -static Datum get_input_types_array_datum(Aggref *original_aggregate); -static Aggref *add_partialize_column(Aggref *agg_to_partialize, AggPartCxt *cxt); -static void set_var_mapping(Var *orig_var, Var *mapped_var, AggPartCxt *cxt); -static Var *var_already_mapped(Var *var, AggPartCxt *cxt); -static Node *create_replace_having_qual_mutator(Node *node, CAggHavingCxt *cxt); -static Node *finalizequery_create_havingqual(FinalizeQueryInfo *inp, - MatTableColumnInfo *mattblinfo); static Var *mattablecolumninfo_addentry(MatTableColumnInfo *out, Node *input, int original_query_resno, bool finalized, bool *skip_adding); -static FuncExpr *get_partialize_funcexpr(Aggref *agg); static inline void makeMaterializeColumnName(char *colbuf, const char *type, int original_query_resno, int colno); @@ -47,432 +32,6 @@ makeMaterializeColumnName(char *colbuf, const char *type, int original_query_res (errcode(ERRCODE_INTERNAL_ERROR), errmsg("bad materialization table column name"))); } -/* - * Creates a partialize expr for the passed in agg: - * partialize_agg(agg). - */ -static FuncExpr * -get_partialize_funcexpr(Aggref *agg) -{ - FuncExpr *partialize_fnexpr; - Oid partfnoid, partargtype; - partargtype = ANYELEMENTOID; - partfnoid = LookupFuncName(list_make2(makeString(FUNCTIONS_SCHEMA_NAME), - makeString(PARTIALIZE_FUNC_NAME)), - 1, - &partargtype, - false); - partialize_fnexpr = makeFuncExpr(partfnoid, - BYTEAOID, - list_make1(agg), /*args*/ - InvalidOid, - InvalidOid, - COERCE_EXPLICIT_CALL); - return partialize_fnexpr; -} - -/* - * Build a [N][2] array where N is number of arguments - * and the inner array is of [schema_name,type_name]. - */ -static Datum -get_input_types_array_datum(Aggref *original_aggregate) -{ - ListCell *lc; - MemoryContext builder_context = - AllocSetContextCreate(CurrentMemoryContext, "input types builder", ALLOCSET_DEFAULT_SIZES); - Oid name_array_type_oid = get_array_type(NAMEOID); - ArrayBuildStateArr *outer_builder = - initArrayResultArr(name_array_type_oid, NAMEOID, builder_context, false); - Datum result; - - foreach (lc, original_aggregate->args) - { - TargetEntry *te = lfirst(lc); - Oid type_oid = exprType((Node *) te->expr); - ArrayBuildState *schema_name_builder = initArrayResult(NAMEOID, builder_context, false); - HeapTuple tp; - Form_pg_type typtup; - char *schema_name; - Name type_name = (Name) palloc0(NAMEDATALEN); - Datum schema_datum; - Datum type_name_datum; - Datum inner_array_datum; - - tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_oid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for type %u", type_oid); - - typtup = (Form_pg_type) GETSTRUCT(tp); - namestrcpy(type_name, NameStr(typtup->typname)); - schema_name = get_namespace_name(typtup->typnamespace); - ReleaseSysCache(tp); - - type_name_datum = NameGetDatum(type_name); - /* Using name in because creating from a char * (that may be null or too long). */ - schema_datum = DirectFunctionCall1(namein, CStringGetDatum(schema_name)); - - accumArrayResult(schema_name_builder, schema_datum, false, NAMEOID, builder_context); - accumArrayResult(schema_name_builder, type_name_datum, false, NAMEOID, builder_context); - - inner_array_datum = makeArrayResult(schema_name_builder, CurrentMemoryContext); - - accumArrayResultArr(outer_builder, - inner_array_datum, - false, - name_array_type_oid, - builder_context); - } - result = makeArrayResultArr(outer_builder, CurrentMemoryContext, false); - - MemoryContextDelete(builder_context); - return result; -} - -static Aggref * -add_partialize_column(Aggref *agg_to_partialize, AggPartCxt *cxt) -{ - Aggref *newagg; - Var *var; - bool skip_adding; - - /* - * Step 1: create partialize( aggref) column - * for materialization table. - */ - var = mattablecolumninfo_addentry(cxt->mattblinfo, - (Node *) agg_to_partialize, - cxt->original_query_resno, - false, - &skip_adding); - cxt->added_aggref_col = true; - /* - * Step 2: create finalize_agg expr using var - * for the column added to the materialization table. - */ - - /* This is a var for the column we created. */ - newagg = get_finalize_aggref(agg_to_partialize, var); - return newagg; -} - -static void -set_var_mapping(Var *orig_var, Var *mapped_var, AggPartCxt *cxt) -{ - cxt->orig_vars = lappend(cxt->orig_vars, orig_var); - cxt->mapped_vars = lappend(cxt->mapped_vars, mapped_var); -} - -/* - * Checks whether var has already been mapped and returns the - * corresponding column of the materialization table. - */ -static Var * -var_already_mapped(Var *var, AggPartCxt *cxt) -{ - ListCell *lc_old, *lc_new; - - forboth (lc_old, cxt->orig_vars, lc_new, cxt->mapped_vars) - { - Var *orig_var = (Var *) lfirst_node(Var, lc_old); - Var *mapped_var = (Var *) lfirst_node(Var, lc_new); - - /* There should be no subqueries so varlevelsup should not be a problem here. */ - if (var->varno == orig_var->varno && var->varattno == orig_var->varattno) - return mapped_var; - } - return NULL; -} - -/* - * Add ts_internal_cagg_final to bytea column. - * bytea column is the internal state for an agg. Pass info for the agg as "inp". - * inpcol = bytea column. - * This function returns an aggref - * ts_internal_cagg_final( Oid, Oid, bytea, NULL::output_typeid) - * the arguments are a list of targetentry - */ -Oid -get_finalize_function_oid(void) -{ - Oid finalfnoid; - Oid finalfnargtypes[] = { TEXTOID, NAMEOID, NAMEOID, get_array_type(NAMEOID), - BYTEAOID, ANYELEMENTOID }; - List *funcname = list_make2(makeString(FUNCTIONS_SCHEMA_NAME), makeString(FINALFN)); - int nargs = sizeof(finalfnargtypes) / sizeof(finalfnargtypes[0]); - finalfnoid = LookupFuncName(funcname, nargs, finalfnargtypes, false); - return finalfnoid; -} - -/* - * Creates an aggref of the form: - * finalize-agg( - * "sum(int)" TEXT, - * collation_schema_name NAME, collation_name NAME, - * input_types_array NAME[N][2], - * BYTEA, - * null:: - * ) - * here sum(int) is the input aggregate "inp" in the parameter-list. - */ -Aggref * -get_finalize_aggref(Aggref *inp, Var *partial_state_var) -{ - Aggref *aggref; - TargetEntry *te; - char *aggregate_signature; - Const *aggregate_signature_const, *collation_schema_const, *collation_name_const, - *input_types_const, *return_type_const; - Oid name_array_type_oid = get_array_type(NAMEOID); - Var *partial_bytea_var; - List *tlist = NIL; - int tlist_attno = 1; - List *argtypes = NIL; - char *collation_name = NULL, *collation_schema_name = NULL; - Datum collation_name_datum = (Datum) 0; - Datum collation_schema_datum = (Datum) 0; - Oid finalfnoid = get_finalize_function_oid(); - - argtypes = list_make5_oid(TEXTOID, NAMEOID, NAMEOID, name_array_type_oid, BYTEAOID); - argtypes = lappend_oid(argtypes, inp->aggtype); - - aggref = makeNode(Aggref); - aggref->aggfnoid = finalfnoid; - aggref->aggtype = inp->aggtype; - aggref->aggcollid = inp->aggcollid; - aggref->inputcollid = inp->inputcollid; - aggref->aggtranstype = InvalidOid; /* will be set by planner */ - aggref->aggargtypes = argtypes; - aggref->aggdirectargs = NULL; /*relevant for hypothetical set aggs*/ - aggref->aggorder = NULL; - aggref->aggdistinct = NULL; - aggref->aggfilter = NULL; - aggref->aggstar = false; - aggref->aggvariadic = false; - aggref->aggkind = AGGKIND_NORMAL; - aggref->aggsplit = AGGSPLIT_SIMPLE; - aggref->location = -1; - /* Construct the arguments. */ - aggregate_signature = format_procedure_qualified(inp->aggfnoid); - aggregate_signature_const = makeConst(TEXTOID, - -1, - DEFAULT_COLLATION_OID, - -1, - CStringGetTextDatum(aggregate_signature), - false, - false /* passbyval */ - ); - te = makeTargetEntry((Expr *) aggregate_signature_const, tlist_attno++, NULL, false); - tlist = lappend(tlist, te); - - if (OidIsValid(inp->inputcollid)) - { - /* Similar to generate_collation_name. */ - HeapTuple tp; - Form_pg_collation colltup; - tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(inp->inputcollid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for collation %u", inp->inputcollid); - colltup = (Form_pg_collation) GETSTRUCT(tp); - collation_name = pstrdup(NameStr(colltup->collname)); - collation_name_datum = DirectFunctionCall1(namein, CStringGetDatum(collation_name)); - - collation_schema_name = get_namespace_name(colltup->collnamespace); - if (collation_schema_name != NULL) - collation_schema_datum = - DirectFunctionCall1(namein, CStringGetDatum(collation_schema_name)); - ReleaseSysCache(tp); - } - collation_schema_const = makeConst(NAMEOID, - -1, - InvalidOid, - NAMEDATALEN, - collation_schema_datum, - (collation_schema_name == NULL) ? true : false, - false /* passbyval */ - ); - te = makeTargetEntry((Expr *) collation_schema_const, tlist_attno++, NULL, false); - tlist = lappend(tlist, te); - - collation_name_const = makeConst(NAMEOID, - -1, - InvalidOid, - NAMEDATALEN, - collation_name_datum, - (collation_name == NULL) ? true : false, - false /* passbyval */ - ); - te = makeTargetEntry((Expr *) collation_name_const, tlist_attno++, NULL, false); - tlist = lappend(tlist, te); - - input_types_const = makeConst(get_array_type(NAMEOID), - -1, - InvalidOid, - -1, - get_input_types_array_datum(inp), - false, - false /* passbyval */ - ); - te = makeTargetEntry((Expr *) input_types_const, tlist_attno++, NULL, false); - tlist = lappend(tlist, te); - - partial_bytea_var = copyObject(partial_state_var); - te = makeTargetEntry((Expr *) partial_bytea_var, tlist_attno++, NULL, false); - tlist = lappend(tlist, te); - - return_type_const = makeNullConst(inp->aggtype, -1, inp->aggcollid); - te = makeTargetEntry((Expr *) return_type_const, tlist_attno++, NULL, false); - tlist = lappend(tlist, te); - - Assert(tlist_attno == 7); - aggref->args = tlist; - return aggref; -} - -Node * -add_var_mutator(Node *node, AggPartCxt *cxt) -{ - if (node == NULL) - return NULL; - if (IsA(node, Aggref)) - { - return node; /* don't process this further */ - } - if (IsA(node, Var)) - { - Var *orig_var, *mapped_var; - bool skip_adding = false; - - mapped_var = var_already_mapped((Var *) node, cxt); - /* Avoid duplicating columns in the materialization table. */ - if (mapped_var) - /* - * There should be no subquery so mapped_var->varlevelsup - * should not be a problem here. - */ - return (Node *) copyObject(mapped_var); - - orig_var = (Var *) node; - mapped_var = mattablecolumninfo_addentry(cxt->mattblinfo, - node, - cxt->original_query_resno, - false, - &skip_adding); - set_var_mapping(orig_var, mapped_var, cxt); - return (Node *) mapped_var; - } - return expression_tree_mutator(node, add_var_mutator, cxt); -} - -/* - * This function modifies the passed in havingQual by mapping exprs to - * columns in materialization table or finalized aggregate form. - * Note that HAVING clause can contain only exprs from group-by or aggregates - * and GROUP BY clauses cannot be aggregates. - * (By the time we process havingQuals, all the group by exprs have been - * processed and have associated columns in the materialization hypertable). - * Example, if the original query has - * GROUP BY colA + colB, colC - * HAVING colA + colB + sum(colD) > 10 OR count(colE) = 10 - * - * The transformed havingqual would be - * HAVING matCol3 + finalize_agg( sum(matCol4) > 10 - * OR finalize_agg( count(matCol5)) = 10 - * - * - * Note: GROUP BY exprs always appear in the query's targetlist. - * Some of the aggregates from the havingQual might also already appear in the targetlist. - * We replace all existing entries with their corresponding entry from the modified targetlist. - * If an aggregate (in the havingqual) does not exist in the TL, we create a - * materialization table column for it and use the finalize(column) form in the - * transformed havingQual. - */ -static Node * -create_replace_having_qual_mutator(Node *node, CAggHavingCxt *cxt) -{ - if (node == NULL) - return NULL; - /* - * See if we already have a column in materialization hypertable for this - * expr. We do this by checking the existing targetlist - * entries for the query. - */ - ListCell *lc, *lc2; - List *origtlist = cxt->origq_tlist; - List *modtlist = cxt->finalizeq_tlist; - forboth (lc, origtlist, lc2, modtlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - TargetEntry *modte = (TargetEntry *) lfirst(lc2); - if (equal(node, te->expr)) - { - return (Node *) modte->expr; - } - } - /* - * Didn't find a match in targetlist. If it is an aggregate, - * create a partialize column for it in materialization hypertable - * and return corresponding finalize expr. - */ - if (IsA(node, Aggref)) - { - AggPartCxt *agg_cxt = &(cxt->agg_cxt); - agg_cxt->added_aggref_col = false; - Aggref *newagg = add_partialize_column((Aggref *) node, agg_cxt); - Assert(agg_cxt->added_aggref_col == true); - return (Node *) newagg; - } - return expression_tree_mutator(node, create_replace_having_qual_mutator, cxt); -} - -static Node * -finalizequery_create_havingqual(FinalizeQueryInfo *inp, MatTableColumnInfo *mattblinfo) -{ - Query *orig_query = inp->final_userquery; - if (orig_query->havingQual == NULL) - return NULL; - Node *havingQual = copyObject(orig_query->havingQual); - Assert(inp->final_seltlist != NULL); - CAggHavingCxt hcxt = { .origq_tlist = orig_query->targetList, - .finalizeq_tlist = inp->final_seltlist, - .agg_cxt.mattblinfo = mattblinfo, - .agg_cxt.original_query_resno = 0, - .agg_cxt.ignore_aggoid = get_finalize_function_oid(), - .agg_cxt.added_aggref_col = false, - .agg_cxt.var_outside_of_aggref = false, - .agg_cxt.orig_vars = NIL, - .agg_cxt.mapped_vars = NIL }; - return create_replace_having_qual_mutator(havingQual, &hcxt); -} - -Node * -add_aggregate_partialize_mutator(Node *node, AggPartCxt *cxt) -{ - if (node == NULL) - return NULL; - /* - * Modify the aggref and create a partialize(aggref) expr - * for the materialization. - * Add a corresponding columndef for the mat table. - * Replace the aggref with the ts_internal_cagg_final fn. - * using a Var for the corresponding column in the mat table. - * All new Vars have varno = 1 (for RTE 1). - */ - if (IsA(node, Aggref)) - { - if (cxt->ignore_aggoid == ((Aggref *) node)->aggfnoid) - return node; /* don't process this further */ - - Aggref *newagg = add_partialize_column((Aggref *) node, cxt); - return (Node *) newagg; - } - if (IsA(node, Var)) - { - cxt->var_outside_of_aggref = true; - } - return expression_tree_mutator(node, add_aggregate_partialize_mutator, cxt); -} - /* * Init the finalize query data structure. * Parameters: @@ -520,19 +79,6 @@ finalizequery_init(FinalizeQueryInfo *inp, Query *orig_query, MatTableColumnInfo cxt.var_outside_of_aggref = false; cxt.original_query_resno = resno; - if (!inp->finalized) - { - /* - * If tle has aggrefs, get the corresponding - * finalize_agg expression and save it in modte. - * Also add correspong materialization table column info - * for the aggrefs in tle. - */ - modte = (TargetEntry *) expression_tree_mutator((Node *) modte, - add_aggregate_partialize_mutator, - &cxt); - } - /* * We need columns for non-aggregate targets. * If it is not a resjunk OR appears in the grouping clause. @@ -556,11 +102,6 @@ finalizequery_init(FinalizeQueryInfo *inp, Query *orig_query, MatTableColumnInfo /* Fix the expression for the target entry. */ modte->expr = (Expr *) var; } - /* Check for left over variables (Var) of targets that contain Aggref. */ - if (cxt.added_aggref_col && cxt.var_outside_of_aggref && !inp->finalized) - { - modte = (TargetEntry *) expression_tree_mutator((Node *) modte, add_var_mutator, &cxt); - } /* * Construct the targetlist for the query on the * materialization table. The TL maps 1-1 with the original query: @@ -577,8 +118,7 @@ finalizequery_init(FinalizeQueryInfo *inp, Query *orig_query, MatTableColumnInfo * final_selquery and origquery. So tleSortGroupReffor the targetentry * can be reused, only table info needs to be modified. */ - Assert((!inp->finalized && modte->resno == resno) || - (inp->finalized && modte->resno >= resno)); + Assert(inp->finalized && modte->resno >= resno); resno++; if (IsA(modte->expr, Var)) { @@ -586,12 +126,6 @@ finalizequery_init(FinalizeQueryInfo *inp, Query *orig_query, MatTableColumnInfo } inp->final_seltlist = lappend(inp->final_seltlist, modte); } - /* - * All grouping clause elements are in targetlist already. - * So let's check the having clause. - */ - if (!inp->finalized) - inp->final_havingqual = finalizequery_create_havingqual(inp, mattblinfo); } /* @@ -777,12 +311,7 @@ finalizequery_get_select_query(FinalizeQueryInfo *inp, List *matcollist, final_selquery->targetList = inp->final_seltlist; final_selquery->sortClause = inp->final_userquery->sortClause; - if (!inp->finalized) - { - final_selquery->groupClause = inp->final_userquery->groupClause; - /* Copy the having clause too */ - final_selquery->havingQual = inp->final_havingqual; - } + /* Already finalized query no need to copy group by or having clause. */ return final_selquery; } @@ -814,7 +343,7 @@ mattablecolumninfo_addentry(MatTableColumnInfo *out, Node *input, int original_q TargetEntry *part_te = NULL; ColumnDef *col; Var *var; - Oid coltype, colcollation; + Oid coltype = InvalidOid, colcollation = InvalidOid; int32 coltypmod; *skip_adding = false; @@ -832,19 +361,6 @@ mattablecolumninfo_addentry(MatTableColumnInfo *out, Node *input, int original_q switch (nodeTag(input)) { - case T_Aggref: - { - FuncExpr *fexpr = get_partialize_funcexpr((Aggref *) input); - makeMaterializeColumnName(colbuf, "agg", original_query_resno, matcolno); - colname = colbuf; - coltype = BYTEAOID; - coltypmod = -1; - colcollation = InvalidOid; - col = makeColumnDef(colname, coltype, coltypmod, colcollation); - part_te = makeTargetEntry((Expr *) fexpr, matcolno, pstrdup(colname), false); - } - break; - case T_TargetEntry: { TargetEntry *tle = (TargetEntry *) input; @@ -894,8 +410,8 @@ mattablecolumninfo_addentry(MatTableColumnInfo *out, Node *input, int original_q col = makeColumnDef(colname, coltype, coltypmod, colcollation); part_te = (TargetEntry *) copyObject(input); - /* Keep original resjunk if finalized or not time bucket. */ - if (!finalized || timebkt_chk) + /* Keep original resjunk if not time bucket. */ + if (timebkt_chk) { /* * Need to project all the partial entries so that @@ -939,8 +455,7 @@ mattablecolumninfo_addentry(MatTableColumnInfo *out, Node *input, int original_q elog(ERROR, "invalid node type %d", nodeTag(input)); break; } - Assert((!finalized && list_length(out->matcollist) == list_length(out->partial_seltlist)) || - (finalized && list_length(out->matcollist) <= list_length(out->partial_seltlist))); + Assert(finalized && list_length(out->matcollist) <= list_length(out->partial_seltlist)); Assert(col != NULL); Assert(part_te != NULL); diff --git a/tsl/src/continuous_aggs/finalize.h b/tsl/src/continuous_aggs/finalize.h index 960e1e37ff1..99c04b594a8 100644 --- a/tsl/src/continuous_aggs/finalize.h +++ b/tsl/src/continuous_aggs/finalize.h @@ -23,12 +23,6 @@ #define FINALFN "finalize_agg" -extern Oid get_finalize_function_oid(void); -extern Aggref *get_finalize_aggref(Aggref *inp, Var *partial_state_var); -extern Node *add_aggregate_partialize_mutator(Node *node, AggPartCxt *cxt); -extern Node *add_var_mutator(Node *node, AggPartCxt *cxt); -extern Node *finalize_query_create_having_qual(FinalizeQueryInfo *inp, - MatTableColumnInfo *mattblinfo); extern Query *finalize_query_get_select_query(FinalizeQueryInfo *inp, List *matcollist, ObjectAddress *mattbladdress); extern void finalizequery_init(FinalizeQueryInfo *inp, Query *orig_query, diff --git a/tsl/src/continuous_aggs/repair.c b/tsl/src/continuous_aggs/repair.c index 984400673bc..f52c2221a03 100644 --- a/tsl/src/continuous_aggs/repair.c +++ b/tsl/src/continuous_aggs/repair.c @@ -17,18 +17,30 @@ static void cagg_rebuild_view_definition(ContinuousAgg *agg, Hypertable *mat_ht, bool force_rebuild) { bool test_failed = false; - char *relname = agg->data.user_view_name.data; - char *schema = agg->data.user_view_schema.data; + char *relname = NameStr(agg->data.user_view_name); + char *schema = NameStr(agg->data.user_view_schema); ListCell *lc1, *lc2; int sec_ctx; Oid uid, saved_uid; + bool finalized = ContinuousAggIsFinalized(agg); + if (!finalized) + { + ereport(WARNING, + (errmsg("repairing Continuous Aggregates with partials are not supported anymore."), + errdetail("Migrate the Continuous Aggregates to finalized form to rebuild."), + errhint("Run \"CALL cagg_migrate('%s.%s');\" to migrate to the new " + "format.", + schema, + relname))); + return; + } + /* Cagg view created by the user. */ Oid user_view_oid = relation_oid(&agg->data.user_view_schema, &agg->data.user_view_name); Relation user_view_rel = relation_open(user_view_oid, AccessShareLock); Query *user_query = get_view_query(user_view_rel); - bool finalized = ContinuousAggIsFinalized(agg); bool rebuild_cagg_with_joins = false; /* Extract final query from user view query. */ diff --git a/tsl/test/expected/cagg_errors_deprecated-13.out b/tsl/test/expected/cagg_errors_deprecated-13.out deleted file mode 100644 index d504e9687b4..00000000000 --- a/tsl/test/expected/cagg_errors_deprecated-13.out +++ /dev/null @@ -1,714 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -\set ON_ERROR_STOP 0 -\set VERBOSITY default ---negative tests for query validation -create table mat_t1( a integer, b integer,c TEXT); -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) -as -select location , min(temperature) -from conditions -group by time_bucket('1d', timec), location WITH NO DATA; -ERROR: unrecognized parameter "timescaledb.myfill" ---valid PG option -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) -as -select * from conditions , mat_t1 WITH NO DATA; -ERROR: unsupported combination of storage parameters -DETAIL: A continuous aggregate does not support standard storage parameters. -HINT: Use only parameters with the "timescaledb." prefix when creating a continuous aggregate. ---non-hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select a, count(*) from mat_t1 -group by a WITH NO DATA; -ERROR: table "mat_t1" is not a hypertable --- no group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. --- no time_bucket in group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions group by location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function --- with valid query in a CTE -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -with m1 as ( -Select location, count(*) from conditions - group by time_bucket('1week', timec) , location) -select * from m1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: CTEs, subqueries and set-returning functions are not supported by continuous aggregates. ---with DISTINCT ON -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as - select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: DISTINCT / DISTINCT ON queries are not supported by continuous aggregates. ---aggregate with DISTINCT -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - count(location) , sum(distinct temperature) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---aggregate with FILTER -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - sum(temperature) filter ( where humidity > 20 ) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- aggregate with filter in having clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), max(temperature) -from conditions - group by time_bucket('1week', timec) , location - having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- time_bucket on non partitioning column of hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timemeasure) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---time_bucket on expression -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---multiple time_bucket functions -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; -ERROR: continuous aggregate view cannot contain multiple time bucket functions ---time_bucket using additional args -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function ---time_bucket using non-const for first argument -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( timeinterval, timec) , location WITH NO DATA; -ERROR: only immutable expressions allowed in time bucket function -HINT: Use an immutable expression as first argument to the time bucket function. --- ordered set aggr -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select mode() within group( order by humidity) -from conditions - group by time_bucket('1week', timec) WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---window function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select avg(temperature) over( order by humidity) -from conditions - WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: Window functions are not supported by continuous aggregates. ---aggregate without combine function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select json_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- Starting on PG16 this test will pass because array_agg is parallel safe --- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature), array_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- userdefined aggregate without combine function -CREATE AGGREGATE newavg ( - sfunc = int4_avg_accum, basetype = int4, stype = _int8, - finalfunc = int8_avg, - initcond1 = '{0,0}' -); -DROP MATERIALIZED VIEW IF EXISTS mat_m1; -NOTICE: materialized view "mat_m1" does not exist, skipping -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), newavg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- using subqueries -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from -( select humidity, temperature, location, timec -from conditions ) q - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -select * from -( Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location ) q WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. ---using limit /limit offset -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -limit 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -offset 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using ORDER BY in view defintion -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -ORDER BY 1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: ORDER BY is not supported in queries defining continuous aggregates. -HINT: Use ORDER BY clauses in SELECTS from the continuous aggregate view instead. ---using FETCH -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -fetch first 10 rows only WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using locking clauses FOR clause ---all should be disabled. we cannot guarntee locks on the hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR KEY SHARE WITH NO DATA; -ERROR: FOR KEY SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR SHARE WITH NO DATA; -ERROR: FOR SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR UPDATE WITH NO DATA; -ERROR: FOR UPDATE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR NO KEY UPDATE WITH NO DATA; -ERROR: FOR NO KEY UPDATE is not allowed with GROUP BY clause ---tablesample clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions tablesample bernoulli(0.2) - group by time_bucket('1week', timec) , location - WITH NO DATA; -ERROR: invalid continuous aggregate view --- ONLY in from clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from ONLY conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view ---grouping sets and variants -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions -group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. ---NO immutable functions -- check all clauses -CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' - STABLE AS 'SELECT $1 + 10'; -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), max(timec + INTERVAL '1h') -from conditions -group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), min(location) -from conditions -group by time_bucket('1week', timec) -having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( test_stablefunc(humidity::int) ), min(location) -from conditions -group by time_bucket('1week', timec) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( temperature ), min(location) -from conditions -group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. --- Should use CREATE MATERIALIZED VIEW to create continuous aggregates -CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 week', timec) - FROM conditions -GROUP BY time_bucket('1 week', timec); -ERROR: cannot create continuous aggregate with CREATE VIEW -HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. --- row security on table -create table rowsec_tab( a bigint, b integer, c integer); -select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); -NOTICE: adding not-null constraint to column "a" -DETAIL: Dimensions cannot have NULL values. - table_name ------------- - rowsec_tab -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; -SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -alter table rowsec_tab ENABLE ROW LEVEL SECURITY; -create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( b), min(c) -from rowsec_tab -group by time_bucket('1', a) WITH NO DATA; -ERROR: cannot create continuous aggregate on hypertable with row security --- cagg on cagg not allowed -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -SELECT time_bucket('1 day', timec) AS bucket - FROM conditions -GROUP BY time_bucket('1 day', timec); -NOTICE: continuous aggregate "mat_m1" is already up-to-date -CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS -SELECT time_bucket('1 week', bucket) AS bucket - FROM mat_m1 -GROUP BY time_bucket('1 week', bucket); -ERROR: old format of continuous aggregate is not supported -HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. -drop table conditions cascade; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to view mat_m1 -drop cascades to view _timescaledb_internal._partial_view_3 -drop cascades to view _timescaledb_internal._direct_view_3 ---negative tests for WITH options -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec) WITH NO DATA; -SELECT h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_with_test' -\gset -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; -ERROR: cannot alter only SET options of a continuous aggregate -\set ON_ERROR_STOP 1 -\set VERBOSITY terse -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects ---test WITH using a hypertable with an integer time dimension -CREATE TABLE conditions ( - timec SMALLINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_s'); - set_integer_now_func ----------------------- - -(1 row) - -\set ON_ERROR_STOP 0 -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -ALTER TABLE conditions ALTER timec type int; -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -\set ON_ERROR_STOP 1 -DROP TABLE conditions cascade; -NOTICE: drop cascades to 3 other objects -CREATE TABLE conditions ( - timec BIGINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_b'); - set_integer_now_func ----------------------- - -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by 1 WITH NO DATA; --- custom time partition functions are not supported with invalidations -CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT - AS $$ SELECT length($1)::BIGINT $$ - LANGUAGE SQL IMMUTABLE; -CREATE TABLE text_time(time TEXT); - SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); -NOTICE: adding not-null constraint to column "time" - create_hypertable -------------------------- - (10,public,text_time,t) -(1 row) - -\set VERBOSITY default -\set ON_ERROR_STOP 0 -CREATE MATERIALIZED VIEW text_view - WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) - AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time - GROUP BY 1 WITH NO DATA; -ERROR: custom partitioning functions not supported with continuous aggregates -\set ON_ERROR_STOP 1 --- Check that we get an error when mixing normal materialized views --- and continuous aggregates. -CREATE MATERIALIZED VIEW normal_mat_view AS -SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time -GROUP BY 1 WITH NO DATA; -\set VERBOSITY terse -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; -ERROR: mixing continuous aggregates and other objects not allowed -\set ON_ERROR_STOP 1 -DROP TABLE text_time CASCADE; -NOTICE: drop cascades to materialized view normal_mat_view -CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); -SELECT create_hypertable('measurements', 'time'); - create_hypertable ----------------------------- - (11,public,measurements,t) -(1 row) - -INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); --- Add a continuous aggregate on the measurements table and a policy --- to be able to test error cases for the add_job function. -CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 day', time), COUNT(time) - FROM measurements -GROUP BY 1 WITH NO DATA; -SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'measurements_summary' -\gset --- First test that add_job checks the config. It is currently possible --- to add non-custom jobs using the add_job function so we need to --- test that the function actually checks the config parameters. These --- should all generate errors, for different reasons... -\set ON_ERROR_STOP 0 --- ... this one because it is missing a field. -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": null, "start_offset": null}'); -ERROR: could not find "mat_hypertable_id" in config for job --- ... this one because it has a bad value for start_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnight" --- ... this one because it has a bad value for end_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id -\gset -\x on -SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; --[ RECORD 1 ]-----+-------------------------------------------------------------------- -id | 1000 -application_name | Refresh Continuous Aggregate Policy [1000] -schedule_interval | @ 1 hour -max_runtime | @ 0 -max_retries | -1 -retry_period | @ 1 hour -proc_schema | _timescaledb_functions -proc_name | policy_refresh_continuous_aggregate -owner | default_perm_user -scheduled | t -fixed_schedule | f -initial_start | -hypertable_id | 12 -config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 12} -check_schema | _timescaledb_functions -check_name | policy_refresh_continuous_aggregate_check -timezone | - -\x off --- These are all weird values for the parameters for the continuous --- aggregate jobs and should generate an error. Since the config will --- be replaced, we will also generate error for missing arguments. -\set ON_ERROR_STOP 0 -SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); -ERROR: could not find "mat_hypertable_id" in config for job -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnights" -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -DROP TABLE measurements CASCADE; -NOTICE: drop cascades to 3 other objects -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects --- test handling of invalid mat_hypertable_id -create table i2980(time timestamptz not null); -select create_hypertable('i2980','time'); - create_hypertable ---------------------- - (13,public,i2980,t) -(1 row) - -create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg" is already up-to-date -select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset -\set ON_ERROR_STOP 0 -select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); -ERROR: configuration materialization hypertable id 1000 not found ---test creating continuous aggregate with compression enabled -- -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -ERROR: cannot enable compression while creating a continuous aggregate ---this one succeeds -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date ---now enable compression with invalid parameters -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot use column "bucket" for both ordering and segmenting -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_orderby = 'bucket'); ---enable compression and test re-enabling compression -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); -NOTICE: defaulting compress_orderby to bucket -insert into i2980 select now(); -call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_15_3_chunk -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks ---Errors with compression policy on caggs-- -select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; - job_id --------- - 1002 -(1 row) - -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compression not enabled on continuous aggregate "i2980_cagg" -ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); -NOTICE: defaulting compress_orderby to time_bucket -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg -SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); -ERROR: function add_continuous_aggregate_policy(unknown, interval, interval) does not exist at character 8 -SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '3'::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT add_compression_policy('i2980_cagg2', 13::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" -FROM timescaledb_information.continuous_aggregates -WHERE view_name = 'i2980_cagg2' -\gset -SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); -ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_15" ---TEST compressing cagg chunks without enabling compression -SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; - count -------- - 1 -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; -ERROR: compression not enabled on "i2980_cagg2" --- test error handling when trying to create cagg on internal hypertable -CREATE TABLE comp_ht_test(time timestamptz NOT NULL); -SELECT table_name FROM create_hypertable('comp_ht_test','time'); - table_name --------------- - comp_ht_test -(1 row) - -ALTER TABLE comp_ht_test SET (timescaledb.compress); -SELECT - format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" -FROM - _timescaledb_catalog.hypertable ht - INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id - AND uncompress.table_name = 'comp_ht_test') \gset -CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; -ERROR: hypertable is an internal compressed hypertable diff --git a/tsl/test/expected/cagg_errors_deprecated-14.out b/tsl/test/expected/cagg_errors_deprecated-14.out deleted file mode 100644 index d504e9687b4..00000000000 --- a/tsl/test/expected/cagg_errors_deprecated-14.out +++ /dev/null @@ -1,714 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -\set ON_ERROR_STOP 0 -\set VERBOSITY default ---negative tests for query validation -create table mat_t1( a integer, b integer,c TEXT); -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) -as -select location , min(temperature) -from conditions -group by time_bucket('1d', timec), location WITH NO DATA; -ERROR: unrecognized parameter "timescaledb.myfill" ---valid PG option -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) -as -select * from conditions , mat_t1 WITH NO DATA; -ERROR: unsupported combination of storage parameters -DETAIL: A continuous aggregate does not support standard storage parameters. -HINT: Use only parameters with the "timescaledb." prefix when creating a continuous aggregate. ---non-hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select a, count(*) from mat_t1 -group by a WITH NO DATA; -ERROR: table "mat_t1" is not a hypertable --- no group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. --- no time_bucket in group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions group by location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function --- with valid query in a CTE -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -with m1 as ( -Select location, count(*) from conditions - group by time_bucket('1week', timec) , location) -select * from m1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: CTEs, subqueries and set-returning functions are not supported by continuous aggregates. ---with DISTINCT ON -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as - select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: DISTINCT / DISTINCT ON queries are not supported by continuous aggregates. ---aggregate with DISTINCT -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - count(location) , sum(distinct temperature) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---aggregate with FILTER -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - sum(temperature) filter ( where humidity > 20 ) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- aggregate with filter in having clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), max(temperature) -from conditions - group by time_bucket('1week', timec) , location - having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- time_bucket on non partitioning column of hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timemeasure) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---time_bucket on expression -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---multiple time_bucket functions -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; -ERROR: continuous aggregate view cannot contain multiple time bucket functions ---time_bucket using additional args -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function ---time_bucket using non-const for first argument -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( timeinterval, timec) , location WITH NO DATA; -ERROR: only immutable expressions allowed in time bucket function -HINT: Use an immutable expression as first argument to the time bucket function. --- ordered set aggr -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select mode() within group( order by humidity) -from conditions - group by time_bucket('1week', timec) WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---window function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select avg(temperature) over( order by humidity) -from conditions - WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: Window functions are not supported by continuous aggregates. ---aggregate without combine function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select json_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- Starting on PG16 this test will pass because array_agg is parallel safe --- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature), array_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- userdefined aggregate without combine function -CREATE AGGREGATE newavg ( - sfunc = int4_avg_accum, basetype = int4, stype = _int8, - finalfunc = int8_avg, - initcond1 = '{0,0}' -); -DROP MATERIALIZED VIEW IF EXISTS mat_m1; -NOTICE: materialized view "mat_m1" does not exist, skipping -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), newavg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- using subqueries -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from -( select humidity, temperature, location, timec -from conditions ) q - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -select * from -( Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location ) q WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. ---using limit /limit offset -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -limit 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -offset 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using ORDER BY in view defintion -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -ORDER BY 1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: ORDER BY is not supported in queries defining continuous aggregates. -HINT: Use ORDER BY clauses in SELECTS from the continuous aggregate view instead. ---using FETCH -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -fetch first 10 rows only WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using locking clauses FOR clause ---all should be disabled. we cannot guarntee locks on the hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR KEY SHARE WITH NO DATA; -ERROR: FOR KEY SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR SHARE WITH NO DATA; -ERROR: FOR SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR UPDATE WITH NO DATA; -ERROR: FOR UPDATE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR NO KEY UPDATE WITH NO DATA; -ERROR: FOR NO KEY UPDATE is not allowed with GROUP BY clause ---tablesample clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions tablesample bernoulli(0.2) - group by time_bucket('1week', timec) , location - WITH NO DATA; -ERROR: invalid continuous aggregate view --- ONLY in from clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from ONLY conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view ---grouping sets and variants -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions -group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. ---NO immutable functions -- check all clauses -CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' - STABLE AS 'SELECT $1 + 10'; -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), max(timec + INTERVAL '1h') -from conditions -group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), min(location) -from conditions -group by time_bucket('1week', timec) -having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( test_stablefunc(humidity::int) ), min(location) -from conditions -group by time_bucket('1week', timec) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( temperature ), min(location) -from conditions -group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. --- Should use CREATE MATERIALIZED VIEW to create continuous aggregates -CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 week', timec) - FROM conditions -GROUP BY time_bucket('1 week', timec); -ERROR: cannot create continuous aggregate with CREATE VIEW -HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. --- row security on table -create table rowsec_tab( a bigint, b integer, c integer); -select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); -NOTICE: adding not-null constraint to column "a" -DETAIL: Dimensions cannot have NULL values. - table_name ------------- - rowsec_tab -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; -SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -alter table rowsec_tab ENABLE ROW LEVEL SECURITY; -create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( b), min(c) -from rowsec_tab -group by time_bucket('1', a) WITH NO DATA; -ERROR: cannot create continuous aggregate on hypertable with row security --- cagg on cagg not allowed -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -SELECT time_bucket('1 day', timec) AS bucket - FROM conditions -GROUP BY time_bucket('1 day', timec); -NOTICE: continuous aggregate "mat_m1" is already up-to-date -CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS -SELECT time_bucket('1 week', bucket) AS bucket - FROM mat_m1 -GROUP BY time_bucket('1 week', bucket); -ERROR: old format of continuous aggregate is not supported -HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. -drop table conditions cascade; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to view mat_m1 -drop cascades to view _timescaledb_internal._partial_view_3 -drop cascades to view _timescaledb_internal._direct_view_3 ---negative tests for WITH options -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec) WITH NO DATA; -SELECT h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_with_test' -\gset -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; -ERROR: cannot alter only SET options of a continuous aggregate -\set ON_ERROR_STOP 1 -\set VERBOSITY terse -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects ---test WITH using a hypertable with an integer time dimension -CREATE TABLE conditions ( - timec SMALLINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_s'); - set_integer_now_func ----------------------- - -(1 row) - -\set ON_ERROR_STOP 0 -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -ALTER TABLE conditions ALTER timec type int; -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -\set ON_ERROR_STOP 1 -DROP TABLE conditions cascade; -NOTICE: drop cascades to 3 other objects -CREATE TABLE conditions ( - timec BIGINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_b'); - set_integer_now_func ----------------------- - -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by 1 WITH NO DATA; --- custom time partition functions are not supported with invalidations -CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT - AS $$ SELECT length($1)::BIGINT $$ - LANGUAGE SQL IMMUTABLE; -CREATE TABLE text_time(time TEXT); - SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); -NOTICE: adding not-null constraint to column "time" - create_hypertable -------------------------- - (10,public,text_time,t) -(1 row) - -\set VERBOSITY default -\set ON_ERROR_STOP 0 -CREATE MATERIALIZED VIEW text_view - WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) - AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time - GROUP BY 1 WITH NO DATA; -ERROR: custom partitioning functions not supported with continuous aggregates -\set ON_ERROR_STOP 1 --- Check that we get an error when mixing normal materialized views --- and continuous aggregates. -CREATE MATERIALIZED VIEW normal_mat_view AS -SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time -GROUP BY 1 WITH NO DATA; -\set VERBOSITY terse -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; -ERROR: mixing continuous aggregates and other objects not allowed -\set ON_ERROR_STOP 1 -DROP TABLE text_time CASCADE; -NOTICE: drop cascades to materialized view normal_mat_view -CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); -SELECT create_hypertable('measurements', 'time'); - create_hypertable ----------------------------- - (11,public,measurements,t) -(1 row) - -INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); --- Add a continuous aggregate on the measurements table and a policy --- to be able to test error cases for the add_job function. -CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 day', time), COUNT(time) - FROM measurements -GROUP BY 1 WITH NO DATA; -SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'measurements_summary' -\gset --- First test that add_job checks the config. It is currently possible --- to add non-custom jobs using the add_job function so we need to --- test that the function actually checks the config parameters. These --- should all generate errors, for different reasons... -\set ON_ERROR_STOP 0 --- ... this one because it is missing a field. -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": null, "start_offset": null}'); -ERROR: could not find "mat_hypertable_id" in config for job --- ... this one because it has a bad value for start_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnight" --- ... this one because it has a bad value for end_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id -\gset -\x on -SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; --[ RECORD 1 ]-----+-------------------------------------------------------------------- -id | 1000 -application_name | Refresh Continuous Aggregate Policy [1000] -schedule_interval | @ 1 hour -max_runtime | @ 0 -max_retries | -1 -retry_period | @ 1 hour -proc_schema | _timescaledb_functions -proc_name | policy_refresh_continuous_aggregate -owner | default_perm_user -scheduled | t -fixed_schedule | f -initial_start | -hypertable_id | 12 -config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 12} -check_schema | _timescaledb_functions -check_name | policy_refresh_continuous_aggregate_check -timezone | - -\x off --- These are all weird values for the parameters for the continuous --- aggregate jobs and should generate an error. Since the config will --- be replaced, we will also generate error for missing arguments. -\set ON_ERROR_STOP 0 -SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); -ERROR: could not find "mat_hypertable_id" in config for job -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnights" -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -DROP TABLE measurements CASCADE; -NOTICE: drop cascades to 3 other objects -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects --- test handling of invalid mat_hypertable_id -create table i2980(time timestamptz not null); -select create_hypertable('i2980','time'); - create_hypertable ---------------------- - (13,public,i2980,t) -(1 row) - -create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg" is already up-to-date -select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset -\set ON_ERROR_STOP 0 -select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); -ERROR: configuration materialization hypertable id 1000 not found ---test creating continuous aggregate with compression enabled -- -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -ERROR: cannot enable compression while creating a continuous aggregate ---this one succeeds -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date ---now enable compression with invalid parameters -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot use column "bucket" for both ordering and segmenting -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_orderby = 'bucket'); ---enable compression and test re-enabling compression -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); -NOTICE: defaulting compress_orderby to bucket -insert into i2980 select now(); -call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_15_3_chunk -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks ---Errors with compression policy on caggs-- -select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; - job_id --------- - 1002 -(1 row) - -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compression not enabled on continuous aggregate "i2980_cagg" -ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); -NOTICE: defaulting compress_orderby to time_bucket -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg -SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); -ERROR: function add_continuous_aggregate_policy(unknown, interval, interval) does not exist at character 8 -SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '3'::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT add_compression_policy('i2980_cagg2', 13::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" -FROM timescaledb_information.continuous_aggregates -WHERE view_name = 'i2980_cagg2' -\gset -SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); -ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_15" ---TEST compressing cagg chunks without enabling compression -SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; - count -------- - 1 -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; -ERROR: compression not enabled on "i2980_cagg2" --- test error handling when trying to create cagg on internal hypertable -CREATE TABLE comp_ht_test(time timestamptz NOT NULL); -SELECT table_name FROM create_hypertable('comp_ht_test','time'); - table_name --------------- - comp_ht_test -(1 row) - -ALTER TABLE comp_ht_test SET (timescaledb.compress); -SELECT - format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" -FROM - _timescaledb_catalog.hypertable ht - INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id - AND uncompress.table_name = 'comp_ht_test') \gset -CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; -ERROR: hypertable is an internal compressed hypertable diff --git a/tsl/test/expected/cagg_errors_deprecated-15.out b/tsl/test/expected/cagg_errors_deprecated-15.out deleted file mode 100644 index d504e9687b4..00000000000 --- a/tsl/test/expected/cagg_errors_deprecated-15.out +++ /dev/null @@ -1,714 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -\set ON_ERROR_STOP 0 -\set VERBOSITY default ---negative tests for query validation -create table mat_t1( a integer, b integer,c TEXT); -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) -as -select location , min(temperature) -from conditions -group by time_bucket('1d', timec), location WITH NO DATA; -ERROR: unrecognized parameter "timescaledb.myfill" ---valid PG option -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) -as -select * from conditions , mat_t1 WITH NO DATA; -ERROR: unsupported combination of storage parameters -DETAIL: A continuous aggregate does not support standard storage parameters. -HINT: Use only parameters with the "timescaledb." prefix when creating a continuous aggregate. ---non-hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select a, count(*) from mat_t1 -group by a WITH NO DATA; -ERROR: table "mat_t1" is not a hypertable --- no group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. --- no time_bucket in group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions group by location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function --- with valid query in a CTE -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -with m1 as ( -Select location, count(*) from conditions - group by time_bucket('1week', timec) , location) -select * from m1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: CTEs, subqueries and set-returning functions are not supported by continuous aggregates. ---with DISTINCT ON -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as - select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: DISTINCT / DISTINCT ON queries are not supported by continuous aggregates. ---aggregate with DISTINCT -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - count(location) , sum(distinct temperature) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---aggregate with FILTER -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - sum(temperature) filter ( where humidity > 20 ) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- aggregate with filter in having clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), max(temperature) -from conditions - group by time_bucket('1week', timec) , location - having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- time_bucket on non partitioning column of hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timemeasure) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---time_bucket on expression -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---multiple time_bucket functions -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; -ERROR: continuous aggregate view cannot contain multiple time bucket functions ---time_bucket using additional args -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function ---time_bucket using non-const for first argument -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( timeinterval, timec) , location WITH NO DATA; -ERROR: only immutable expressions allowed in time bucket function -HINT: Use an immutable expression as first argument to the time bucket function. --- ordered set aggr -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select mode() within group( order by humidity) -from conditions - group by time_bucket('1week', timec) WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---window function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select avg(temperature) over( order by humidity) -from conditions - WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: Window functions are not supported by continuous aggregates. ---aggregate without combine function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select json_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- Starting on PG16 this test will pass because array_agg is parallel safe --- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature), array_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- userdefined aggregate without combine function -CREATE AGGREGATE newavg ( - sfunc = int4_avg_accum, basetype = int4, stype = _int8, - finalfunc = int8_avg, - initcond1 = '{0,0}' -); -DROP MATERIALIZED VIEW IF EXISTS mat_m1; -NOTICE: materialized view "mat_m1" does not exist, skipping -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), newavg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- using subqueries -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from -( select humidity, temperature, location, timec -from conditions ) q - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -select * from -( Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location ) q WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. ---using limit /limit offset -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -limit 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -offset 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using ORDER BY in view defintion -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -ORDER BY 1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: ORDER BY is not supported in queries defining continuous aggregates. -HINT: Use ORDER BY clauses in SELECTS from the continuous aggregate view instead. ---using FETCH -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -fetch first 10 rows only WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using locking clauses FOR clause ---all should be disabled. we cannot guarntee locks on the hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR KEY SHARE WITH NO DATA; -ERROR: FOR KEY SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR SHARE WITH NO DATA; -ERROR: FOR SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR UPDATE WITH NO DATA; -ERROR: FOR UPDATE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR NO KEY UPDATE WITH NO DATA; -ERROR: FOR NO KEY UPDATE is not allowed with GROUP BY clause ---tablesample clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions tablesample bernoulli(0.2) - group by time_bucket('1week', timec) , location - WITH NO DATA; -ERROR: invalid continuous aggregate view --- ONLY in from clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from ONLY conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view ---grouping sets and variants -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions -group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. ---NO immutable functions -- check all clauses -CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' - STABLE AS 'SELECT $1 + 10'; -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), max(timec + INTERVAL '1h') -from conditions -group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), min(location) -from conditions -group by time_bucket('1week', timec) -having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( test_stablefunc(humidity::int) ), min(location) -from conditions -group by time_bucket('1week', timec) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( temperature ), min(location) -from conditions -group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. --- Should use CREATE MATERIALIZED VIEW to create continuous aggregates -CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 week', timec) - FROM conditions -GROUP BY time_bucket('1 week', timec); -ERROR: cannot create continuous aggregate with CREATE VIEW -HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. --- row security on table -create table rowsec_tab( a bigint, b integer, c integer); -select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); -NOTICE: adding not-null constraint to column "a" -DETAIL: Dimensions cannot have NULL values. - table_name ------------- - rowsec_tab -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; -SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -alter table rowsec_tab ENABLE ROW LEVEL SECURITY; -create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( b), min(c) -from rowsec_tab -group by time_bucket('1', a) WITH NO DATA; -ERROR: cannot create continuous aggregate on hypertable with row security --- cagg on cagg not allowed -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -SELECT time_bucket('1 day', timec) AS bucket - FROM conditions -GROUP BY time_bucket('1 day', timec); -NOTICE: continuous aggregate "mat_m1" is already up-to-date -CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS -SELECT time_bucket('1 week', bucket) AS bucket - FROM mat_m1 -GROUP BY time_bucket('1 week', bucket); -ERROR: old format of continuous aggregate is not supported -HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. -drop table conditions cascade; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to view mat_m1 -drop cascades to view _timescaledb_internal._partial_view_3 -drop cascades to view _timescaledb_internal._direct_view_3 ---negative tests for WITH options -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec) WITH NO DATA; -SELECT h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_with_test' -\gset -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; -ERROR: cannot alter only SET options of a continuous aggregate -\set ON_ERROR_STOP 1 -\set VERBOSITY terse -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects ---test WITH using a hypertable with an integer time dimension -CREATE TABLE conditions ( - timec SMALLINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_s'); - set_integer_now_func ----------------------- - -(1 row) - -\set ON_ERROR_STOP 0 -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -ALTER TABLE conditions ALTER timec type int; -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -\set ON_ERROR_STOP 1 -DROP TABLE conditions cascade; -NOTICE: drop cascades to 3 other objects -CREATE TABLE conditions ( - timec BIGINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_b'); - set_integer_now_func ----------------------- - -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by 1 WITH NO DATA; --- custom time partition functions are not supported with invalidations -CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT - AS $$ SELECT length($1)::BIGINT $$ - LANGUAGE SQL IMMUTABLE; -CREATE TABLE text_time(time TEXT); - SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); -NOTICE: adding not-null constraint to column "time" - create_hypertable -------------------------- - (10,public,text_time,t) -(1 row) - -\set VERBOSITY default -\set ON_ERROR_STOP 0 -CREATE MATERIALIZED VIEW text_view - WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) - AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time - GROUP BY 1 WITH NO DATA; -ERROR: custom partitioning functions not supported with continuous aggregates -\set ON_ERROR_STOP 1 --- Check that we get an error when mixing normal materialized views --- and continuous aggregates. -CREATE MATERIALIZED VIEW normal_mat_view AS -SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time -GROUP BY 1 WITH NO DATA; -\set VERBOSITY terse -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; -ERROR: mixing continuous aggregates and other objects not allowed -\set ON_ERROR_STOP 1 -DROP TABLE text_time CASCADE; -NOTICE: drop cascades to materialized view normal_mat_view -CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); -SELECT create_hypertable('measurements', 'time'); - create_hypertable ----------------------------- - (11,public,measurements,t) -(1 row) - -INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); --- Add a continuous aggregate on the measurements table and a policy --- to be able to test error cases for the add_job function. -CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 day', time), COUNT(time) - FROM measurements -GROUP BY 1 WITH NO DATA; -SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'measurements_summary' -\gset --- First test that add_job checks the config. It is currently possible --- to add non-custom jobs using the add_job function so we need to --- test that the function actually checks the config parameters. These --- should all generate errors, for different reasons... -\set ON_ERROR_STOP 0 --- ... this one because it is missing a field. -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": null, "start_offset": null}'); -ERROR: could not find "mat_hypertable_id" in config for job --- ... this one because it has a bad value for start_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnight" --- ... this one because it has a bad value for end_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id -\gset -\x on -SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; --[ RECORD 1 ]-----+-------------------------------------------------------------------- -id | 1000 -application_name | Refresh Continuous Aggregate Policy [1000] -schedule_interval | @ 1 hour -max_runtime | @ 0 -max_retries | -1 -retry_period | @ 1 hour -proc_schema | _timescaledb_functions -proc_name | policy_refresh_continuous_aggregate -owner | default_perm_user -scheduled | t -fixed_schedule | f -initial_start | -hypertable_id | 12 -config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 12} -check_schema | _timescaledb_functions -check_name | policy_refresh_continuous_aggregate_check -timezone | - -\x off --- These are all weird values for the parameters for the continuous --- aggregate jobs and should generate an error. Since the config will --- be replaced, we will also generate error for missing arguments. -\set ON_ERROR_STOP 0 -SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); -ERROR: could not find "mat_hypertable_id" in config for job -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnights" -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -DROP TABLE measurements CASCADE; -NOTICE: drop cascades to 3 other objects -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects --- test handling of invalid mat_hypertable_id -create table i2980(time timestamptz not null); -select create_hypertable('i2980','time'); - create_hypertable ---------------------- - (13,public,i2980,t) -(1 row) - -create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg" is already up-to-date -select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset -\set ON_ERROR_STOP 0 -select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); -ERROR: configuration materialization hypertable id 1000 not found ---test creating continuous aggregate with compression enabled -- -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -ERROR: cannot enable compression while creating a continuous aggregate ---this one succeeds -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date ---now enable compression with invalid parameters -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot use column "bucket" for both ordering and segmenting -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_orderby = 'bucket'); ---enable compression and test re-enabling compression -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); -NOTICE: defaulting compress_orderby to bucket -insert into i2980 select now(); -call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_15_3_chunk -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks ---Errors with compression policy on caggs-- -select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; - job_id --------- - 1002 -(1 row) - -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compression not enabled on continuous aggregate "i2980_cagg" -ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); -NOTICE: defaulting compress_orderby to time_bucket -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg -SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); -ERROR: function add_continuous_aggregate_policy(unknown, interval, interval) does not exist at character 8 -SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '3'::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT add_compression_policy('i2980_cagg2', 13::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" -FROM timescaledb_information.continuous_aggregates -WHERE view_name = 'i2980_cagg2' -\gset -SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); -ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_15" ---TEST compressing cagg chunks without enabling compression -SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; - count -------- - 1 -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; -ERROR: compression not enabled on "i2980_cagg2" --- test error handling when trying to create cagg on internal hypertable -CREATE TABLE comp_ht_test(time timestamptz NOT NULL); -SELECT table_name FROM create_hypertable('comp_ht_test','time'); - table_name --------------- - comp_ht_test -(1 row) - -ALTER TABLE comp_ht_test SET (timescaledb.compress); -SELECT - format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" -FROM - _timescaledb_catalog.hypertable ht - INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id - AND uncompress.table_name = 'comp_ht_test') \gset -CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; -ERROR: hypertable is an internal compressed hypertable diff --git a/tsl/test/expected/cagg_errors_deprecated-16.out b/tsl/test/expected/cagg_errors_deprecated-16.out deleted file mode 100644 index d48635538a4..00000000000 --- a/tsl/test/expected/cagg_errors_deprecated-16.out +++ /dev/null @@ -1,712 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -\set ON_ERROR_STOP 0 -\set VERBOSITY default ---negative tests for query validation -create table mat_t1( a integer, b integer,c TEXT); -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) -as -select location , min(temperature) -from conditions -group by time_bucket('1d', timec), location WITH NO DATA; -ERROR: unrecognized parameter "timescaledb.myfill" ---valid PG option -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) -as -select * from conditions , mat_t1 WITH NO DATA; -ERROR: unsupported combination of storage parameters -DETAIL: A continuous aggregate does not support standard storage parameters. -HINT: Use only parameters with the "timescaledb." prefix when creating a continuous aggregate. ---non-hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select a, count(*) from mat_t1 -group by a WITH NO DATA; -ERROR: table "mat_t1" is not a hypertable --- no group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. --- no time_bucket in group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions group by location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function --- with valid query in a CTE -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -with m1 as ( -Select location, count(*) from conditions - group by time_bucket('1week', timec) , location) -select * from m1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: CTEs, subqueries and set-returning functions are not supported by continuous aggregates. ---with DISTINCT ON -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as - select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: DISTINCT / DISTINCT ON queries are not supported by continuous aggregates. ---aggregate with DISTINCT -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - count(location) , sum(distinct temperature) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---aggregate with FILTER -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - sum(temperature) filter ( where humidity > 20 ) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- aggregate with filter in having clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), max(temperature) -from conditions - group by time_bucket('1week', timec) , location - having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported --- time_bucket on non partitioning column of hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timemeasure) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---time_bucket on expression -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column ---multiple time_bucket functions -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; -ERROR: continuous aggregate view cannot contain multiple time bucket functions ---time_bucket using additional args -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; -ERROR: continuous aggregate view must include a valid time bucket function ---time_bucket using non-const for first argument -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( timeinterval, timec) , location WITH NO DATA; -ERROR: only immutable expressions allowed in time bucket function -HINT: Use an immutable expression as first argument to the time bucket function. --- ordered set aggr -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select mode() within group( order by humidity) -from conditions - group by time_bucket('1week', timec) WITH NO DATA; -ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported ---window function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select avg(temperature) over( order by humidity) -from conditions - WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: Window functions are not supported by continuous aggregates. ---aggregate without combine function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select json_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- Starting on PG16 this test will pass because array_agg is parallel safe --- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature), array_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -; --- userdefined aggregate without combine function -CREATE AGGREGATE newavg ( - sfunc = int4_avg_accum, basetype = int4, stype = _int8, - finalfunc = int8_avg, - initcond1 = '{0,0}' -); -DROP MATERIALIZED VIEW IF EXISTS mat_m1; -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), newavg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: aggregates which are not parallelizable are not supported -; --- using subqueries -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from -( select humidity, temperature, location, timec -from conditions ) q - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -select * from -( Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location ) q WITH NO DATA; -ERROR: invalid continuous aggregate query -HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. ---using limit /limit offset -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -limit 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -offset 10 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using ORDER BY in view defintion -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -ORDER BY 1 WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: ORDER BY is not supported in queries defining continuous aggregates. -HINT: Use ORDER BY clauses in SELECTS from the continuous aggregate view instead. ---using FETCH -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -fetch first 10 rows only WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. -HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. ---using locking clauses FOR clause ---all should be disabled. we cannot guarntee locks on the hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR KEY SHARE WITH NO DATA; -ERROR: FOR KEY SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR SHARE WITH NO DATA; -ERROR: FOR SHARE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR UPDATE WITH NO DATA; -ERROR: FOR UPDATE is not allowed with GROUP BY clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR NO KEY UPDATE WITH NO DATA; -ERROR: FOR NO KEY UPDATE is not allowed with GROUP BY clause ---tablesample clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions tablesample bernoulli(0.2) - group by time_bucket('1week', timec) , location - WITH NO DATA; -ERROR: invalid continuous aggregate view --- ONLY in from clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from ONLY conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: invalid continuous aggregate view ---grouping sets and variants -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions -group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; -ERROR: invalid continuous aggregate query -DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates -HINT: Define multiple continuous aggregates with different grouping levels. ---NO immutable functions -- check all clauses -CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' - STABLE AS 'SELECT $1 + 10'; -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), max(timec + INTERVAL '1h') -from conditions -group by time_bucket('1week', timec) , location WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), min(location) -from conditions -group by time_bucket('1week', timec) -having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( test_stablefunc(humidity::int) ), min(location) -from conditions -group by time_bucket('1week', timec) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( temperature ), min(location) -from conditions -group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; -ERROR: only immutable functions supported in continuous aggregate view -HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. --- Should use CREATE MATERIALIZED VIEW to create continuous aggregates -CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 week', timec) - FROM conditions -GROUP BY time_bucket('1 week', timec); -ERROR: cannot create continuous aggregate with CREATE VIEW -HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. --- row security on table -create table rowsec_tab( a bigint, b integer, c integer); -select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); -NOTICE: adding not-null constraint to column "a" -DETAIL: Dimensions cannot have NULL values. - table_name ------------- - rowsec_tab -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; -SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -alter table rowsec_tab ENABLE ROW LEVEL SECURITY; -create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( b), min(c) -from rowsec_tab -group by time_bucket('1', a) WITH NO DATA; -ERROR: cannot create continuous aggregate on hypertable with row security --- cagg on cagg not allowed -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -SELECT time_bucket('1 day', timec) AS bucket - FROM conditions -GROUP BY time_bucket('1 day', timec); -NOTICE: continuous aggregate "mat_m1" is already up-to-date -CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS -SELECT time_bucket('1 week', bucket) AS bucket - FROM mat_m1 -GROUP BY time_bucket('1 week', bucket); -ERROR: old format of continuous aggregate is not supported -HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. -drop table conditions cascade; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to view mat_m1 -drop cascades to view _timescaledb_internal._partial_view_4 -drop cascades to view _timescaledb_internal._direct_view_4 ---negative tests for WITH options -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec'); - table_name ------------- - conditions -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec) WITH NO DATA; -SELECT h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_with_test' -\gset -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); -ERROR: cannot alter create_group_indexes option for continuous aggregates -ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; -ERROR: cannot alter only SET options of a continuous aggregate -\set ON_ERROR_STOP 1 -\set VERBOSITY terse -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects ---test WITH using a hypertable with an integer time dimension -CREATE TABLE conditions ( - timec SMALLINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_s'); - set_integer_now_func ----------------------- - -(1 row) - -\set ON_ERROR_STOP 0 -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -ERROR: time bucket function must reference a hypertable dimension column -ALTER TABLE conditions ALTER timec type int; -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; -\set ON_ERROR_STOP 1 -DROP TABLE conditions cascade; -NOTICE: drop cascades to 3 other objects -CREATE TABLE conditions ( - timec BIGINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - table_name ------------- - conditions -(1 row) - -CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_b'); - set_integer_now_func ----------------------- - -(1 row) - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by 1 WITH NO DATA; --- custom time partition functions are not supported with invalidations -CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT - AS $$ SELECT length($1)::BIGINT $$ - LANGUAGE SQL IMMUTABLE; -CREATE TABLE text_time(time TEXT); - SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); -NOTICE: adding not-null constraint to column "time" - create_hypertable -------------------------- - (11,public,text_time,t) -(1 row) - -\set VERBOSITY default -\set ON_ERROR_STOP 0 -CREATE MATERIALIZED VIEW text_view - WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) - AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time - GROUP BY 1 WITH NO DATA; -ERROR: custom partitioning functions not supported with continuous aggregates -\set ON_ERROR_STOP 1 --- Check that we get an error when mixing normal materialized views --- and continuous aggregates. -CREATE MATERIALIZED VIEW normal_mat_view AS -SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time -GROUP BY 1 WITH NO DATA; -\set VERBOSITY terse -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; -ERROR: mixing continuous aggregates and other objects not allowed -\set ON_ERROR_STOP 1 -DROP TABLE text_time CASCADE; -NOTICE: drop cascades to materialized view normal_mat_view -CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); -SELECT create_hypertable('measurements', 'time'); - create_hypertable ----------------------------- - (12,public,measurements,t) -(1 row) - -INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); --- Add a continuous aggregate on the measurements table and a policy --- to be able to test error cases for the add_job function. -CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 day', time), COUNT(time) - FROM measurements -GROUP BY 1 WITH NO DATA; -SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'measurements_summary' -\gset --- First test that add_job checks the config. It is currently possible --- to add non-custom jobs using the add_job function so we need to --- test that the function actually checks the config parameters. These --- should all generate errors, for different reasons... -\set ON_ERROR_STOP 0 --- ... this one because it is missing a field. -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": null, "start_offset": null}'); -ERROR: could not find "mat_hypertable_id" in config for job --- ... this one because it has a bad value for start_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnight" --- ... this one because it has a bad value for end_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id -\gset -\x on -SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; --[ RECORD 1 ]-----+-------------------------------------------------------------------- -id | 1000 -application_name | Refresh Continuous Aggregate Policy [1000] -schedule_interval | @ 1 hour -max_runtime | @ 0 -max_retries | -1 -retry_period | @ 1 hour -proc_schema | _timescaledb_functions -proc_name | policy_refresh_continuous_aggregate -owner | default_perm_user -scheduled | t -fixed_schedule | f -initial_start | -hypertable_id | 13 -config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 13} -check_schema | _timescaledb_functions -check_name | policy_refresh_continuous_aggregate_check -timezone | - -\x off --- These are all weird values for the parameters for the continuous --- aggregate jobs and should generate an error. Since the config will --- be replaced, we will also generate error for missing arguments. -\set ON_ERROR_STOP 0 -SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); -ERROR: could not find "mat_hypertable_id" in config for job -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); -ERROR: invalid input syntax for type interval: "1 fortnights" -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); -ERROR: invalid input syntax for type interval: "chicken" -\set ON_ERROR_STOP 1 -DROP TABLE measurements CASCADE; -NOTICE: drop cascades to 3 other objects -DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 3 other objects --- test handling of invalid mat_hypertable_id -create table i2980(time timestamptz not null); -select create_hypertable('i2980','time'); - create_hypertable ---------------------- - (14,public,i2980,t) -(1 row) - -create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg" is already up-to-date -select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset -\set ON_ERROR_STOP 0 -select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); -ERROR: configuration materialization hypertable id 1000 not found ---test creating continuous aggregate with compression enabled -- -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -ERROR: cannot enable compression while creating a continuous aggregate ---this one succeeds -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; -NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date ---now enable compression with invalid parameters -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot use column "bucket" for both ordering and segmenting -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_orderby = 'bucket'); ---enable compression and test re-enabling compression -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); -NOTICE: defaulting compress_orderby to bucket -insert into i2980 select now(); -call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_16_3_chunk -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); -NOTICE: defaulting compress_orderby to bucket -ERROR: cannot change configuration on already compressed chunks ---Errors with compression policy on caggs-- -select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; - job_id --------- - 1002 -(1 row) - -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compression not enabled on continuous aggregate "i2980_cagg" -ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); -NOTICE: defaulting compress_orderby to time_bucket -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg -SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); -ERROR: function add_continuous_aggregate_policy(unknown, interval, interval) does not exist at character 8 -SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); -ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 -SELECT add_compression_policy('i2980_cagg2', '3'::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT add_compression_policy('i2980_cagg2', 13::integer); -ERROR: unsupported compress_after argument type, expected type : interval -SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" -FROM timescaledb_information.continuous_aggregates -WHERE view_name = 'i2980_cagg2' -\gset -SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); -ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_16" ---TEST compressing cagg chunks without enabling compression -SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; - count -------- - 1 -(1 row) - -ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; -ERROR: compression not enabled on "i2980_cagg2" --- test error handling when trying to create cagg on internal hypertable -CREATE TABLE comp_ht_test(time timestamptz NOT NULL); -SELECT table_name FROM create_hypertable('comp_ht_test','time'); - table_name --------------- - comp_ht_test -(1 row) - -ALTER TABLE comp_ht_test SET (timescaledb.compress); -SELECT - format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" -FROM - _timescaledb_catalog.hypertable ht - INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id - AND uncompress.table_name = 'comp_ht_test') \gset -CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; -ERROR: hypertable is an internal compressed hypertable diff --git a/tsl/test/expected/cagg_joins.out b/tsl/test/expected/cagg_joins.out index c142d8d101f..d40dad84b7b 100644 --- a/tsl/test/expected/cagg_joins.out +++ b/tsl/test/expected/cagg_joins.out @@ -937,42 +937,7 @@ GROUP BY name, bucket; ERROR: invalid continuous aggregate view DETAIL: Unsupported expression in join clause. HINT: Only equality conditions are supported in continuous aggregates. ---With old format cagg definition -CREATE MATERIALIZED VIEW cagg_cagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized = FALSE) AS -SELECT time_bucket(INTERVAL '1 day', day) AS bucket, - AVG(temperature), - MAX(temperature), - MIN(temperature), - devices.device_id device_id, - name -FROM conditions, devices -WHERE conditions.device_id = devices.device_id -GROUP BY name, bucket, devices.device_id; -ERROR: old format of continuous aggregate is not supported with joins -HINT: Set timescaledb.finalized to TRUE. -CREATE MATERIALIZED VIEW cagg_cagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized = FALSE) AS -SELECT time_bucket(INTERVAL '1 day', day) AS bucket, - AVG(temperature), - MAX(temperature), - MIN(temperature), - devices.device_id device_id, - name -FROM conditions JOIN devices -ON conditions.device_id = devices.device_id -GROUP BY name, bucket, devices.device_id; -ERROR: old format of continuous aggregate is not supported with joins -HINT: Set timescaledb.finalized to TRUE. CREATE TABLE mat_t1( a integer, b integer,c TEXT); ---With LATERAL multiple tables old format -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only = FALSE, timescaledb.finalized = false) -as -select temperature, count(*) from conditions, -LATERAL (Select * from mat_t1 where a = conditions.temperature) q -group by temperature WITH NO DATA; -ERROR: old format of continuous aggregate is not supported with joins -HINT: Set timescaledb.finalized to TRUE. --With LATERAL multiple tables in new format CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only = FALSE) as diff --git a/tsl/test/expected/cagg_migrate.out b/tsl/test/expected/cagg_migrate.out index dfd9f214262..4065821b773 100644 --- a/tsl/test/expected/cagg_migrate.out +++ b/tsl/test/expected/cagg_migrate.out @@ -1,45 +1,201 @@ -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\set IS_DISTRIBUTED FALSE -\set IS_TIME_DIMENSION FALSE +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -- ######################################################## -- ## INTEGER data type tests -- ######################################################## +\set IS_TIME_DIMENSION FALSE \set TIME_DIMENSION_DATATYPE INTEGER \ir include/cagg_migrate_common.sql -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\if :IS_DISTRIBUTED -\echo 'Running distributed hypertable tests' -\else -\echo 'Running local hypertable tests' -Running local hypertable tests -\endif -CREATE TABLE conditions ( - "time" :TIME_DIMENSION_DATATYPE NOT NULL, - temperature NUMERIC +-- Setup some variables +SELECT + format('\! zcat include/data/cagg_migrate_%1$s.sql.gz > %2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "ZCAT_CMD", + format('%2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "TEST_SCHEMA_FILE" +\gset +-- decompress dump file +:ZCAT_CMD +-- restore dump +SELECT timescaledb_pre_restore(); + timescaledb_pre_restore +------------------------- + t +(1 row) + +\ir :TEST_SCHEMA_FILE +CREATE TABLE public.conditions ( + "time" integer NOT NULL, + temperature numeric ); -\if :IS_DISTRIBUTED - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); - \else - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); - \endif -\else - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_hypertable('conditions', 'time'); - \else - SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); - table_name ------------- - conditions +CREATE VIEW _timescaledb_internal._direct_view_2 AS + SELECT public.time_bucket(24, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket(24, "time")); +CREATE VIEW _timescaledb_internal._direct_view_3 AS + SELECT public.time_bucket(24, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket(24, "time")); +CREATE VIEW _timescaledb_internal._direct_view_4 AS + SELECT public.time_bucket(168, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket(168, "time")); +CREATE TABLE _timescaledb_internal._materialized_hypertable_2 ( + bucket integer NOT NULL, + min numeric, + max numeric, + avg numeric, + sum numeric +); +CREATE TABLE _timescaledb_internal._materialized_hypertable_3 ( + bucket integer NOT NULL, + agg_2_2 bytea, + agg_3_3 bytea, + agg_4_4 bytea, + agg_5_5 bytea, + chunk_id integer +); +CREATE TABLE _timescaledb_internal._materialized_hypertable_4 ( + bucket integer NOT NULL, + agg_2_2 bytea, + agg_3_3 bytea, + agg_4_4 bytea, + agg_5_5 bytea, + chunk_id integer +); +CREATE VIEW _timescaledb_internal._partial_view_2 AS + SELECT public.time_bucket(24, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket(24, "time")); +CREATE VIEW _timescaledb_internal._partial_view_3 AS + SELECT public.time_bucket(24, "time") AS bucket, + _timescaledb_functions.partialize_agg(min(temperature)) AS agg_2_2, + _timescaledb_functions.partialize_agg(max(temperature)) AS agg_3_3, + _timescaledb_functions.partialize_agg(avg(temperature)) AS agg_4_4, + _timescaledb_functions.partialize_agg(sum(temperature)) AS agg_5_5, + _timescaledb_functions.chunk_id_from_relid(tableoid) AS chunk_id + FROM public.conditions + GROUP BY (public.time_bucket(24, "time")), (_timescaledb_functions.chunk_id_from_relid(tableoid)); +CREATE VIEW _timescaledb_internal._partial_view_4 AS + SELECT public.time_bucket(168, "time") AS bucket, + _timescaledb_functions.partialize_agg(min(temperature)) AS agg_2_2, + _timescaledb_functions.partialize_agg(max(temperature)) AS agg_3_3, + _timescaledb_functions.partialize_agg(avg(temperature)) AS agg_4_4, + _timescaledb_functions.partialize_agg(sum(temperature)) AS agg_5_5, + _timescaledb_functions.chunk_id_from_relid(tableoid) AS chunk_id + FROM public.conditions + GROUP BY (public.time_bucket(168, "time")), (_timescaledb_functions.chunk_id_from_relid(tableoid)); +CREATE VIEW public.conditions_summary_daily AS + SELECT _materialized_hypertable_3.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_3 + WHERE (_materialized_hypertable_3.bucket < COALESCE((_timescaledb_functions.cagg_watermark(3))::integer, '-2147483648'::integer)) + GROUP BY _materialized_hypertable_3.bucket +UNION ALL + SELECT public.time_bucket(24, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE((_timescaledb_functions.cagg_watermark(3))::integer, '-2147483648'::integer)) + GROUP BY (public.time_bucket(24, conditions."time")); +CREATE VIEW public.conditions_summary_daily_new AS + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.avg, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 + WHERE (_materialized_hypertable_2.bucket < COALESCE((_timescaledb_functions.cagg_watermark(2))::integer, '-2147483648'::integer)) +UNION ALL + SELECT public.time_bucket(24, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE((_timescaledb_functions.cagg_watermark(2))::integer, '-2147483648'::integer)) + GROUP BY (public.time_bucket(24, conditions."time")); +CREATE VIEW public.conditions_summary_weekly AS + SELECT _materialized_hypertable_4.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_4.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_4 + WHERE (_materialized_hypertable_4.bucket < COALESCE((_timescaledb_functions.cagg_watermark(4))::integer, '-2147483648'::integer)) + GROUP BY _materialized_hypertable_4.bucket +UNION ALL + SELECT public.time_bucket(168, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE((_timescaledb_functions.cagg_watermark(4))::integer, '-2147483648'::integer)) + GROUP BY (public.time_bucket(168, conditions."time")); +COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, replication_factor, status) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, bucket_width, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_materialization_invalidation_log (materialization_id, lowest_modified_value, greatest_modified_value) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_watermark (mat_hypertable_id, watermark) FROM stdin; +SELECT pg_catalog.setval('_timescaledb_catalog.dimension_id_seq', 4, true); + setval +-------- + 4 +(1 row) + +SELECT pg_catalog.setval('_timescaledb_catalog.hypertable_id_seq', 4, true); + setval +-------- + 4 +(1 row) + +CREATE INDEX _materialized_hypertable_2_bucket_idx ON _timescaledb_internal._materialized_hypertable_2 USING btree (bucket DESC); +CREATE INDEX _materialized_hypertable_3_bucket_idx ON _timescaledb_internal._materialized_hypertable_3 USING btree (bucket DESC); +CREATE INDEX _materialized_hypertable_4_bucket_idx ON _timescaledb_internal._materialized_hypertable_4 USING btree (bucket DESC); +CREATE INDEX conditions_time_idx ON public.conditions USING btree ("time" DESC); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_2 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_3 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_4 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_cagg_invalidation_trigger AFTER INSERT OR DELETE OR UPDATE ON public.conditions FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.continuous_agg_invalidation_trigger('1'); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON public.conditions FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +SELECT timescaledb_post_restore(); + timescaledb_post_restore +-------------------------- + t +(1 row) + +-- Make sure no scheduled job will be executed during the regression tests +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t (1 row) - \endif -\endif \if :IS_TIME_DIMENSION INSERT INTO conditions ("time", temperature) SELECT @@ -52,90 +208,24 @@ CREATE TABLE conditions ( SELECT coalesce(max(time), 0) FROM public.conditions $$; - \if :IS_DISTRIBUTED - SELECT - 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT" - \gset - CALL distributed_exec (:'STMT'); - \endif - SELECT set_integer_now_func('conditions', 'integer_now'); - set_integer_now_func ----------------------- - -(1 row) - INSERT INTO conditions ("time", temperature) SELECT generate_series(1, 1000, 1), 0.25; \endif --- new cagg format (finalized=true) -CREATE MATERIALIZED VIEW conditions_summary_daily_new -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket -WITH NO DATA; --- older continuous aggregate to be migrated -CREATE MATERIALIZED VIEW conditions_summary_daily -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; -psql:include/cagg_migrate_common.sql:95: NOTICE: refreshing continuous aggregate "conditions_summary_daily" --- for permission tests -CREATE MATERIALIZED VIEW conditions_summary_weekly -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 week', "time") AS bucket, -\else - time_bucket(INTEGER '168', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; -psql:include/cagg_migrate_common.sql:113: NOTICE: refreshing continuous aggregate "conditions_summary_weekly" +CALL refresh_continuous_aggregate('conditions_summary_daily', NULL, NULL); +CALL refresh_continuous_aggregate('conditions_summary_weekly', NULL, NULL); \set ON_ERROR_STOP 0 -- should fail because we don't need to migrate finalized caggs CALL cagg_migrate('conditions_summary_daily_new'); -psql:include/cagg_migrate_common.sql:117: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration -\set ON_ERROR_STOP 1 -\set ON_ERROR_STOP 0 +psql:include/cagg_migrate_common.sql:46: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration -- should fail relation does not exist CALL cagg_migrate('conditions_summary_not_cagg'); -psql:include/cagg_migrate_common.sql:122: ERROR: relation "conditions_summary_not_cagg" does not exist at character 19 +psql:include/cagg_migrate_common.sql:49: ERROR: relation "conditions_summary_not_cagg" does not exist at character 19 CREATE TABLE conditions_summary_not_cagg(); -- should fail continuous agg does not exist CALL cagg_migrate('conditions_summary_not_cagg'); -psql:include/cagg_migrate_common.sql:125: ERROR: continuous aggregate "public.conditions_summary_not_cagg" does not exist +psql:include/cagg_migrate_common.sql:54: ERROR: continuous aggregate "public.conditions_summary_not_cagg" does not exist \set ON_ERROR_STOP 1 DROP TABLE conditions_summary_not_cagg; SELECT @@ -155,7 +245,7 @@ WHERE \set ON_ERROR_STOP 0 -- should fail because the new cagg with suffix '_new' already exists CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:147: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists +psql:include/cagg_migrate_common.sql:76: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists \set ON_ERROR_STOP 1 -- remove the new cagg to execute the migration DROP MATERIALIZED VIEW conditions_summary_daily_new; @@ -216,8 +306,8 @@ SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalo -- should resume the execution CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:169: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily" -psql:include/cagg_migrate_common.sql:169: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);" +psql:include/cagg_migrate_common.sql:98: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily" +psql:include/cagg_migrate_common.sql:98: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);" SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+----------+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -244,13 +334,13 @@ SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalo \set ON_ERROR_STOP 0 -- should error because plan already exists CALL _timescaledb_functions.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new'); -psql:include/cagg_migrate_common.sql:174: ERROR: plan already exists for materialized hypertable 3 +psql:include/cagg_migrate_common.sql:103: ERROR: plan already exists for materialized hypertable 3 CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:175: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily +psql:include/cagg_migrate_common.sql:104: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily \set ON_ERROR_STOP 1 -- policies for test ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true); -psql:include/cagg_migrate_common.sql:179: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:108: NOTICE: defaulting compress_orderby to bucket \if :IS_TIME_DIMENSION SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval); SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval); @@ -289,12 +379,12 @@ AND job_id >= 1000; -- execute the migration DROP MATERIALIZED VIEW conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:198: NOTICE: drop cascades to 10 other objects +psql:include/cagg_migrate_common.sql:127: NOTICE: drop cascades to 10 other objects TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:199: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:128: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:200: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:200: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);" +psql:include/cagg_migrate_common.sql:129: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:129: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);" SELECT ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID", h.schema_name AS "NEW_MAT_SCHEMA_NAME", @@ -425,9 +515,9 @@ JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id ORDER BY bgw_job.id; -- test migration overriding the new cagg and keeping the old DROP MATERIALIZED VIEW conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:248: NOTICE: drop cascades to 10 other objects +psql:include/cagg_migrate_common.sql:177: NOTICE: drop cascades to 10 other objects TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:178: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" -- check policies before the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -438,8 +528,8 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d (3 rows) CALL cagg_migrate('conditions_summary_daily', override => TRUE); -psql:include/cagg_migrate_common.sql:252: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:252: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);" +psql:include/cagg_migrate_common.sql:181: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:181: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);" -- cagg with the new format because it was overriden \d+ conditions_summary_daily View "public.conditions_summary_daily" @@ -500,7 +590,7 @@ UNION ALL \set ON_ERROR_STOP 0 -- should fail because the cagg was overriden SELECT * FROM conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 +psql:include/cagg_migrate_common.sql:188: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 \set ON_ERROR_STOP 1 -- check policies after the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; @@ -528,9 +618,9 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d -- test migration overriding the new cagg and removing the old TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:269: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:198: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_daily; -psql:include/cagg_migrate_common.sql:270: NOTICE: drop cascades to 10 other objects +psql:include/cagg_migrate_common.sql:199: NOTICE: drop cascades to 10 other objects ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily; -- check policies before the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; @@ -542,12 +632,12 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d (3 rows) CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE); -psql:include/cagg_migrate_common.sql:274: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:274: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);" -psql:include/cagg_migrate_common.sql:274: NOTICE: drop cascades to 10 other objects -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1002 not found, skipping -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1001 not found, skipping -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1000 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:203: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);" +psql:include/cagg_migrate_common.sql:203: NOTICE: drop cascades to 10 other objects +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1002 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1001 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1000 not found, skipping -- cagg with the new format because it was overriden \d+ conditions_summary_daily View "public.conditions_summary_daily" @@ -579,10 +669,10 @@ UNION ALL \set ON_ERROR_STOP 0 -- should fail because the cagg was overriden SELECT * FROM conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:279: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 +psql:include/cagg_migrate_common.sql:208: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 -- should fail because the old cagg was removed SELECT * FROM conditions_summary_daily_old; -psql:include/cagg_migrate_common.sql:281: ERROR: relation "conditions_summary_daily_old" does not exist at character 15 +psql:include/cagg_migrate_common.sql:210: ERROR: relation "conditions_summary_daily_old" does not exist at character 15 \set ON_ERROR_STOP 1 -- check policies after the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; @@ -607,14 +697,14 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d -- permission tests TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:291: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:220: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER; ALTER MATERIALIZED VIEW conditions_summary_weekly OWNER TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:298: ERROR: permission denied for table continuous_agg_migrate_plan +psql:include/cagg_migrate_common.sql:227: ERROR: permission denied for table continuous_agg_migrate_plan \set ON_ERROR_STOP 1 RESET ROLE; GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER; @@ -622,7 +712,7 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:308: ERROR: permission denied for table continuous_agg_migrate_plan_step +psql:include/cagg_migrate_common.sql:237: ERROR: permission denied for table continuous_agg_migrate_plan_step \set ON_ERROR_STOP 1 RESET ROLE; GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER; @@ -630,14 +720,14 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:318: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq +psql:include/cagg_migrate_common.sql:247: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq \set ON_ERROR_STOP 1 RESET ROLE; GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; -- all necessary permissions granted CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:327: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly_new', CAST('1008' AS integer), NULL);" +psql:include/cagg_migrate_common.sql:256: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly_new', CAST('1008' AS integer), NULL);" -- check migrated data. should return 0 (zero) rows SELECT * FROM conditions_summary_weekly EXCEPT @@ -676,14 +766,14 @@ RESET ROLE; -- execute transaction control statements. Transaction control statements are only -- allowed if CALL is executed in its own transaction.` TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:344: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:273: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_weekly_new; -psql:include/cagg_migrate_common.sql:345: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:274: NOTICE: drop cascades to 6 other objects \set ON_ERROR_STOP 0 BEGIN; -- should fail with `invalid transaction termination` CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:350: ERROR: invalid transaction termination +psql:include/cagg_migrate_common.sql:279: ERROR: invalid transaction termination ROLLBACK; \set ON_ERROR_STOP 1 CREATE FUNCTION execute_migration() RETURNS void AS @@ -699,7 +789,7 @@ LANGUAGE plpgsql; BEGIN; -- should fail with `invalid transaction termination` SELECT execute_migration(); -psql:include/cagg_migrate_common.sql:367: ERROR: invalid transaction termination +psql:include/cagg_migrate_common.sql:296: ERROR: invalid transaction termination ROLLBACK; \set ON_ERROR_STOP 1 -- cleanup @@ -707,12 +797,18 @@ DROP FUNCTION execute_migration(); REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER; REVOKE USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq FROM :ROLE_DEFAULT_PERM_USER; TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:375: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:304: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_daily; -psql:include/cagg_migrate_common.sql:376: NOTICE: drop cascades to 10 other objects +psql:include/cagg_migrate_common.sql:305: NOTICE: drop cascades to 10 other objects DROP MATERIALIZED VIEW conditions_summary_weekly; -psql:include/cagg_migrate_common.sql:377: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:306: NOTICE: drop cascades to 6 other objects DROP TABLE conditions; +SELECT _timescaledb_functions.start_background_workers(); + start_background_workers +-------------------------- + t +(1 row) + -- ######################################################## -- ## TIMESTAMP data type tests -- ######################################################## @@ -722,36 +818,191 @@ DROP TABLE conditions; -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\if :IS_DISTRIBUTED -\echo 'Running distributed hypertable tests' -\else -\echo 'Running local hypertable tests' -Running local hypertable tests -\endif -CREATE TABLE conditions ( - "time" :TIME_DIMENSION_DATATYPE NOT NULL, - temperature NUMERIC +-- Setup some variables +SELECT + format('\! zcat include/data/cagg_migrate_%1$s.sql.gz > %2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "ZCAT_CMD", + format('%2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "TEST_SCHEMA_FILE" +\gset +-- decompress dump file +:ZCAT_CMD +-- restore dump +SELECT timescaledb_pre_restore(); + timescaledb_pre_restore +------------------------- + t +(1 row) + +\ir :TEST_SCHEMA_FILE +CREATE TABLE public.conditions ( + "time" timestamp without time zone NOT NULL, + temperature numeric ); -\if :IS_DISTRIBUTED - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); - \else - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); - \endif -\else - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_hypertable('conditions', 'time'); -psql:include/cagg_migrate_common.sql:26: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices - table_name ------------- - conditions +CREATE VIEW _timescaledb_internal._direct_view_6 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")); +CREATE VIEW _timescaledb_internal._direct_view_7 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")); +CREATE VIEW _timescaledb_internal._direct_view_8 AS + SELECT public.time_bucket('7 days'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('7 days'::interval, "time")); +CREATE TABLE _timescaledb_internal._materialized_hypertable_6 ( + bucket timestamp without time zone NOT NULL, + min numeric, + max numeric, + avg numeric, + sum numeric +); +CREATE TABLE _timescaledb_internal._materialized_hypertable_7 ( + bucket timestamp without time zone NOT NULL, + agg_2_2 bytea, + agg_3_3 bytea, + agg_4_4 bytea, + agg_5_5 bytea, + chunk_id integer +); +CREATE TABLE _timescaledb_internal._materialized_hypertable_8 ( + bucket timestamp without time zone NOT NULL, + agg_2_2 bytea, + agg_3_3 bytea, + agg_4_4 bytea, + agg_5_5 bytea, + chunk_id integer +); +CREATE VIEW _timescaledb_internal._partial_view_6 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")); +CREATE VIEW _timescaledb_internal._partial_view_7 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + _timescaledb_functions.partialize_agg(min(temperature)) AS agg_2_2, + _timescaledb_functions.partialize_agg(max(temperature)) AS agg_3_3, + _timescaledb_functions.partialize_agg(avg(temperature)) AS agg_4_4, + _timescaledb_functions.partialize_agg(sum(temperature)) AS agg_5_5, + _timescaledb_functions.chunk_id_from_relid(tableoid) AS chunk_id + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")), (_timescaledb_functions.chunk_id_from_relid(tableoid)); +CREATE VIEW _timescaledb_internal._partial_view_8 AS + SELECT public.time_bucket('7 days'::interval, "time") AS bucket, + _timescaledb_functions.partialize_agg(min(temperature)) AS agg_2_2, + _timescaledb_functions.partialize_agg(max(temperature)) AS agg_3_3, + _timescaledb_functions.partialize_agg(avg(temperature)) AS agg_4_4, + _timescaledb_functions.partialize_agg(sum(temperature)) AS agg_5_5, + _timescaledb_functions.chunk_id_from_relid(tableoid) AS chunk_id + FROM public.conditions + GROUP BY (public.time_bucket('7 days'::interval, "time")), (_timescaledb_functions.chunk_id_from_relid(tableoid)); +CREATE VIEW public.conditions_summary_daily AS + SELECT _materialized_hypertable_7.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_7 + WHERE (_materialized_hypertable_7.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(7)), '-infinity'::timestamp without time zone)) + GROUP BY _materialized_hypertable_7.bucket +UNION ALL + SELECT public.time_bucket('1 day'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(7)), '-infinity'::timestamp without time zone)) + GROUP BY (public.time_bucket('1 day'::interval, conditions."time")); +CREATE VIEW public.conditions_summary_daily_new AS + SELECT _materialized_hypertable_6.bucket, + _materialized_hypertable_6.min, + _materialized_hypertable_6.max, + _materialized_hypertable_6.avg, + _materialized_hypertable_6.sum + FROM _timescaledb_internal._materialized_hypertable_6 + WHERE (_materialized_hypertable_6.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(6)), '-infinity'::timestamp without time zone)) +UNION ALL + SELECT public.time_bucket('1 day'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(6)), '-infinity'::timestamp without time zone)) + GROUP BY (public.time_bucket('1 day'::interval, conditions."time")); +CREATE VIEW public.conditions_summary_weekly AS + SELECT _materialized_hypertable_8.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_8.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_8.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_8.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_8.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_8 + WHERE (_materialized_hypertable_8.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(8)), '-infinity'::timestamp without time zone)) + GROUP BY _materialized_hypertable_8.bucket +UNION ALL + SELECT public.time_bucket('7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(8)), '-infinity'::timestamp without time zone)) + GROUP BY (public.time_bucket('7 days'::interval, conditions."time")); +COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, replication_factor, status) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, bucket_width, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_materialization_invalidation_log (materialization_id, lowest_modified_value, greatest_modified_value) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_watermark (mat_hypertable_id, watermark) FROM stdin; +SELECT pg_catalog.setval('_timescaledb_catalog.dimension_id_seq', 8, true); + setval +-------- + 8 +(1 row) + +SELECT pg_catalog.setval('_timescaledb_catalog.hypertable_id_seq', 8, true); + setval +-------- + 8 +(1 row) + +CREATE INDEX _materialized_hypertable_6_bucket_idx ON _timescaledb_internal._materialized_hypertable_6 USING btree (bucket DESC); +CREATE INDEX _materialized_hypertable_7_bucket_idx ON _timescaledb_internal._materialized_hypertable_7 USING btree (bucket DESC); +CREATE INDEX _materialized_hypertable_8_bucket_idx ON _timescaledb_internal._materialized_hypertable_8 USING btree (bucket DESC); +CREATE INDEX conditions_time_idx ON public.conditions USING btree ("time" DESC); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_6 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_7 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_8 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_cagg_invalidation_trigger AFTER INSERT OR DELETE OR UPDATE ON public.conditions FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.continuous_agg_invalidation_trigger('5'); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON public.conditions FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +SELECT timescaledb_post_restore(); + timescaledb_post_restore +-------------------------- + t +(1 row) + +-- Make sure no scheduled job will be executed during the regression tests +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t (1 row) - \else - SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); - \endif -\endif \if :IS_TIME_DIMENSION INSERT INTO conditions ("time", temperature) SELECT @@ -764,85 +1015,24 @@ psql:include/cagg_migrate_common.sql:26: WARNING: column type "timestamp withou SELECT coalesce(max(time), 0) FROM public.conditions $$; - \if :IS_DISTRIBUTED - SELECT - 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT" - \gset - CALL distributed_exec (:'STMT'); - \endif - SELECT set_integer_now_func('conditions', 'integer_now'); INSERT INTO conditions ("time", temperature) SELECT generate_series(1, 1000, 1), 0.25; \endif --- new cagg format (finalized=true) -CREATE MATERIALIZED VIEW conditions_summary_daily_new -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket -WITH NO DATA; --- older continuous aggregate to be migrated -CREATE MATERIALIZED VIEW conditions_summary_daily -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; -psql:include/cagg_migrate_common.sql:95: NOTICE: refreshing continuous aggregate "conditions_summary_daily" --- for permission tests -CREATE MATERIALIZED VIEW conditions_summary_weekly -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 week', "time") AS bucket, -\else - time_bucket(INTEGER '168', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; -psql:include/cagg_migrate_common.sql:113: NOTICE: refreshing continuous aggregate "conditions_summary_weekly" +CALL refresh_continuous_aggregate('conditions_summary_daily', NULL, NULL); +CALL refresh_continuous_aggregate('conditions_summary_weekly', NULL, NULL); \set ON_ERROR_STOP 0 -- should fail because we don't need to migrate finalized caggs CALL cagg_migrate('conditions_summary_daily_new'); -psql:include/cagg_migrate_common.sql:117: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration -\set ON_ERROR_STOP 1 -\set ON_ERROR_STOP 0 +psql:include/cagg_migrate_common.sql:46: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration -- should fail relation does not exist CALL cagg_migrate('conditions_summary_not_cagg'); -psql:include/cagg_migrate_common.sql:122: ERROR: relation "conditions_summary_not_cagg" does not exist at character 19 +psql:include/cagg_migrate_common.sql:49: ERROR: relation "conditions_summary_not_cagg" does not exist at character 19 CREATE TABLE conditions_summary_not_cagg(); -- should fail continuous agg does not exist CALL cagg_migrate('conditions_summary_not_cagg'); -psql:include/cagg_migrate_common.sql:125: ERROR: continuous aggregate "public.conditions_summary_not_cagg" does not exist +psql:include/cagg_migrate_common.sql:54: ERROR: continuous aggregate "public.conditions_summary_not_cagg" does not exist \set ON_ERROR_STOP 1 DROP TABLE conditions_summary_not_cagg; SELECT @@ -862,7 +1052,7 @@ WHERE \set ON_ERROR_STOP 0 -- should fail because the new cagg with suffix '_new' already exists CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:147: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists +psql:include/cagg_migrate_common.sql:76: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists \set ON_ERROR_STOP 1 -- remove the new cagg to execute the migration DROP MATERIALIZED VIEW conditions_summary_daily_new; @@ -877,79 +1067,79 @@ SELECT CALL _timescaledb_functions.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new'); \x on SELECT mat_hypertable_id, user_view_definition FROM _timescaledb_catalog.continuous_agg_migrate_plan; --[ RECORD 1 ]--------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -mat_hypertable_id | 16 -user_view_definition | SELECT _materialized_hypertable_16.bucket, + - | _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_2_2, NULL::numeric) AS min, + - | _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_3_3, NULL::numeric) AS max, + - | _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_4_4, NULL::numeric) AS avg, + - | _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_5_5, NULL::numeric) AS sum + - | FROM _timescaledb_internal._materialized_hypertable_16 + - | WHERE (_materialized_hypertable_16.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(16)), '-infinity'::timestamp without time zone))+ - | GROUP BY _materialized_hypertable_16.bucket + - | UNION ALL + - | SELECT public.time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + - | min(conditions.temperature) AS min, + - | max(conditions.temperature) AS max, + - | avg(conditions.temperature) AS avg, + - | sum(conditions.temperature) AS sum + - | FROM public.conditions + - | WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(16)), '-infinity'::timestamp without time zone)) + +-[ RECORD 1 ]--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +mat_hypertable_id | 7 +user_view_definition | SELECT _materialized_hypertable_7.bucket, + + | _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_2_2, NULL::numeric) AS min, + + | _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_3_3, NULL::numeric) AS max, + + | _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_4_4, NULL::numeric) AS avg, + + | _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_5_5, NULL::numeric) AS sum + + | FROM _timescaledb_internal._materialized_hypertable_7 + + | WHERE (_materialized_hypertable_7.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(7)), '-infinity'::timestamp without time zone))+ + | GROUP BY _materialized_hypertable_7.bucket + + | UNION ALL + + | SELECT public.time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + + | min(conditions.temperature) AS min, + + | max(conditions.temperature) AS max, + + | avg(conditions.temperature) AS avg, + + | sum(conditions.temperature) AS sum + + | FROM public.conditions + + | WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(7)), '-infinity'::timestamp without time zone)) + | GROUP BY (public.time_bucket('@ 1 day'::interval, conditions."time")); \x off SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+-------------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 16 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} - 16 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} - 16 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null} - 16 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"} - 16 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 14 | NOT STARTED | ENABLE POLICIES | + 7 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} + 7 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} + 7 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null} + 7 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"} + 7 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 14 | NOT STARTED | ENABLE POLICIES | (14 rows) -- should resume the execution CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:169: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily" -psql:include/cagg_migrate_common.sql:169: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" +psql:include/cagg_migrate_common.sql:98: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily" +psql:include/cagg_migrate_common.sql:98: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 16 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} - 16 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} - 16 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} - 16 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"} - 16 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 14 | FINISHED | ENABLE POLICIES | + 7 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} + 7 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} + 7 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} + 7 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"} + 7 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 14 | FINISHED | ENABLE POLICIES | (14 rows) \set ON_ERROR_STOP 0 -- should error because plan already exists CALL _timescaledb_functions.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new'); -psql:include/cagg_migrate_common.sql:174: ERROR: plan already exists for materialized hypertable 16 +psql:include/cagg_migrate_common.sql:103: ERROR: plan already exists for materialized hypertable 7 CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:175: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily +psql:include/cagg_migrate_common.sql:104: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily \set ON_ERROR_STOP 1 -- policies for test ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true); -psql:include/cagg_migrate_common.sql:179: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:108: NOTICE: defaulting compress_orderby to bucket \if :IS_TIME_DIMENSION SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval); add_retention_policy @@ -979,21 +1169,21 @@ FROM timescaledb_information.jobs WHERE hypertable_schema = :'MAT_SCHEMA_NAME' AND hypertable_name = :'MAT_TABLE_NAME' AND job_id >= 1000; - job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name ---------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------------------------+------------+---------------+-----------------------+-----------------------------+------------------------+------------------------------------------- - 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 16, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_16 | _timescaledb_functions | policy_compression_check - 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 16} | | | _timescaledb_internal | _materialized_hypertable_16 | _timescaledb_functions | policy_refresh_continuous_aggregate_check - 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 16} | | | _timescaledb_internal | _materialized_hypertable_16 | _timescaledb_functions | policy_retention_check + job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name +--------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+------------------------+------------------------------------------- + 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 7, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_7 | _timescaledb_functions | policy_compression_check + 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 7} | | | _timescaledb_internal | _materialized_hypertable_7 | _timescaledb_functions | policy_refresh_continuous_aggregate_check + 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 7} | | | _timescaledb_internal | _materialized_hypertable_7 | _timescaledb_functions | policy_retention_check (3 rows) -- execute the migration DROP MATERIALIZED VIEW conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:198: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:127: NOTICE: drop cascades to 6 other objects TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:199: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:128: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:200: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:200: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" +psql:include/cagg_migrate_common.sql:129: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:129: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" SELECT ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID", h.schema_name AS "NEW_MAT_SCHEMA_NAME", @@ -1018,13 +1208,13 @@ WHERE avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_20.bucket, - _materialized_hypertable_20.min, - _materialized_hypertable_20.max, - _materialized_hypertable_20.avg, - _materialized_hypertable_20.sum - FROM _timescaledb_internal._materialized_hypertable_20 - WHERE _materialized_hypertable_20.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(20)), '-infinity'::timestamp without time zone) + SELECT _materialized_hypertable_11.bucket, + _materialized_hypertable_11.min, + _materialized_hypertable_11.max, + _materialized_hypertable_11.avg, + _materialized_hypertable_11.sum + FROM _timescaledb_internal._materialized_hypertable_11 + WHERE _materialized_hypertable_11.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp without time zone) UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1032,7 +1222,7 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(20)), '-infinity'::timestamp without time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp without time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); SELECT * @@ -1042,28 +1232,28 @@ AND hypertable_name = :'NEW_MAT_TABLE_NAME' AND job_id >= 1000; job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name --------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------------------------+------------+---------------+-----------------------+-----------------------------+------------------------+------------------------------------------- - 1017 | Compression Policy [1017] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 20, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_compression_check - 1016 | Refresh Continuous Aggregate Policy [1016] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 20} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_refresh_continuous_aggregate_check - 1015 | Retention Policy [1015] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 20} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_retention_check + 1017 | Compression Policy [1017] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 11, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_11 | _timescaledb_functions | policy_compression_check + 1016 | Refresh Continuous Aggregate Policy [1016] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 11} | | | _timescaledb_internal | _materialized_hypertable_11 | _timescaledb_functions | policy_refresh_continuous_aggregate_check + 1015 | Retention Policy [1015] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 11} | | | _timescaledb_internal | _materialized_hypertable_11 | _timescaledb_functions | policy_retention_check (3 rows) SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 16 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} - 16 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} - 16 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1014, 1012]} - 16 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"} - 16 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 16 | 11 | FINISHED | COPY POLICIES | {"policies": [1014, 1013, 1012], "cagg_name_new": "conditions_summary_daily_new"} - 16 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 16 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1015, 1016, 1017, 1014, 1013, 1012]} + 7 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} + 7 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} + 7 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1014, 1012]} + 7 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"} + 7 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 7 | 11 | FINISHED | COPY POLICIES | {"policies": [1014, 1013, 1012], "cagg_name_new": "conditions_summary_daily_new"} + 7 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 7 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1015, 1016, 1017, 1014, 1013, 1012]} (14 rows) -- check migrated data. should return 0 (zero) rows @@ -1076,25 +1266,25 @@ SELECT * FROM conditions_summary_daily_new; -- compress both caggs SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text; - compress_chunk -------------------------------------------- - _timescaledb_internal._hyper_16_237_chunk - _timescaledb_internal._hyper_16_238_chunk - _timescaledb_internal._hyper_16_239_chunk - _timescaledb_internal._hyper_16_240_chunk - _timescaledb_internal._hyper_16_241_chunk - _timescaledb_internal._hyper_16_242_chunk + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_7_237_chunk + _timescaledb_internal._hyper_7_238_chunk + _timescaledb_internal._hyper_7_239_chunk + _timescaledb_internal._hyper_7_240_chunk + _timescaledb_internal._hyper_7_241_chunk + _timescaledb_internal._hyper_7_242_chunk (6 rows) SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text; compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_20_255_chunk - _timescaledb_internal._hyper_20_256_chunk - _timescaledb_internal._hyper_20_257_chunk - _timescaledb_internal._hyper_20_258_chunk - _timescaledb_internal._hyper_20_259_chunk - _timescaledb_internal._hyper_20_260_chunk + _timescaledb_internal._hyper_11_255_chunk + _timescaledb_internal._hyper_11_256_chunk + _timescaledb_internal._hyper_11_257_chunk + _timescaledb_internal._hyper_11_258_chunk + _timescaledb_internal._hyper_11_259_chunk + _timescaledb_internal._hyper_11_260_chunk (6 rows) -- check migrated data after compression. should return 0 (zero) rows @@ -1112,21 +1302,21 @@ JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id ORDER BY bgw_job.id; -- test migration overriding the new cagg and keeping the old DROP MATERIALIZED VIEW conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:248: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:177: NOTICE: drop cascades to 6 other objects TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:178: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" -- check policies before the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; - schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone ---------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 16 | {"drop_after": "@ 30 days", "hypertable_id": 16} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 16 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 16} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 16 | {"hypertable_id": 16, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- + public | conditions_summary_daily | 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 7 | {"drop_after": "@ 30 days", "hypertable_id": 7} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 7 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 7} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) CALL cagg_migrate('conditions_summary_daily', override => TRUE); -psql:include/cagg_migrate_common.sql:252: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:252: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" +psql:include/cagg_migrate_common.sql:181: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:181: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" -- cagg with the new format because it was overriden \d+ conditions_summary_daily View "public.conditions_summary_daily" @@ -1138,13 +1328,13 @@ psql:include/cagg_migrate_common.sql:252: WARNING: refresh the continuous aggre avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_22.bucket, - _materialized_hypertable_22.min, - _materialized_hypertable_22.max, - _materialized_hypertable_22.avg, - _materialized_hypertable_22.sum - FROM _timescaledb_internal._materialized_hypertable_22 - WHERE _materialized_hypertable_22.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(22)), '-infinity'::timestamp without time zone) + SELECT _materialized_hypertable_13.bucket, + _materialized_hypertable_13.min, + _materialized_hypertable_13.max, + _materialized_hypertable_13.avg, + _materialized_hypertable_13.sum + FROM _timescaledb_internal._materialized_hypertable_13 + WHERE _materialized_hypertable_13.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(13)), '-infinity'::timestamp without time zone) UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1152,7 +1342,7 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(22)), '-infinity'::timestamp without time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(13)), '-infinity'::timestamp without time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); -- cagg with the old format because it was overriden @@ -1166,14 +1356,14 @@ UNION ALL avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_16.bucket, - _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_2_2, NULL::numeric) AS min, - _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_3_3, NULL::numeric) AS max, - _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_4_4, NULL::numeric) AS avg, - _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_16.agg_5_5, NULL::numeric) AS sum - FROM _timescaledb_internal._materialized_hypertable_16 - WHERE _materialized_hypertable_16.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(16)), '-infinity'::timestamp without time zone) - GROUP BY _materialized_hypertable_16.bucket + SELECT _materialized_hypertable_7.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_7.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_7 + WHERE _materialized_hypertable_7.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(7)), '-infinity'::timestamp without time zone) + GROUP BY _materialized_hypertable_7.bucket UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1181,30 +1371,30 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(16)), '-infinity'::timestamp without time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(7)), '-infinity'::timestamp without time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); \set ON_ERROR_STOP 0 -- should fail because the cagg was overriden SELECT * FROM conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 +psql:include/cagg_migrate_common.sql:188: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 \set ON_ERROR_STOP 1 -- check policies after the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone --------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1018 | Retention Policy [1018] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 22 | {"drop_after": "@ 30 days", "hypertable_id": 22} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1019 | Refresh Continuous Aggregate Policy [1019] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 22 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 22} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1020 | Compression Policy [1020] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 22 | {"hypertable_id": 22, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + public | conditions_summary_daily | 1018 | Retention Policy [1018] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 13 | {"drop_after": "@ 30 days", "hypertable_id": 13} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1019 | Refresh Continuous Aggregate Policy [1019] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 13 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 13} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1020 | Compression Policy [1020] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 13 | {"hypertable_id": 13, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) -- should return the old cagg jobs SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old'; - schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone ---------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily_old | 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 16 | {"drop_after": "@ 30 days", "hypertable_id": 16} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily_old | 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 16 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 16} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily_old | 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 16 | {"hypertable_id": 16, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +--------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- + public | conditions_summary_daily_old | 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 7 | {"drop_after": "@ 30 days", "hypertable_id": 7} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily_old | 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 7 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 7} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily_old | 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) -- should return no rows because the cagg was overwritten @@ -1215,26 +1405,26 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d -- test migration overriding the new cagg and removing the old TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:269: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:198: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_daily; -psql:include/cagg_migrate_common.sql:270: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:199: NOTICE: drop cascades to 6 other objects ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily; -- check policies before the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; - schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone ---------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 16 | {"drop_after": "@ 30 days", "hypertable_id": 16} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 16 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 16} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 16 | {"hypertable_id": 16, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- + public | conditions_summary_daily | 1012 | Retention Policy [1012] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 7 | {"drop_after": "@ 30 days", "hypertable_id": 7} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1013 | Refresh Continuous Aggregate Policy [1013] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 7 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 7} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1014 | Compression Policy [1014] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE); -psql:include/cagg_migrate_common.sql:274: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:274: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" -psql:include/cagg_migrate_common.sql:274: NOTICE: drop cascades to 6 other objects -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1014 not found, skipping -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1013 not found, skipping -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1012 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:203: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);" +psql:include/cagg_migrate_common.sql:203: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1014 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1013 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1012 not found, skipping -- cagg with the new format because it was overriden \d+ conditions_summary_daily View "public.conditions_summary_daily" @@ -1246,13 +1436,13 @@ psql:include/cagg_migrate_common.sql:274: NOTICE: job 1012 not found, skipping avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_24.bucket, - _materialized_hypertable_24.min, - _materialized_hypertable_24.max, - _materialized_hypertable_24.avg, - _materialized_hypertable_24.sum - FROM _timescaledb_internal._materialized_hypertable_24 - WHERE _materialized_hypertable_24.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(24)), '-infinity'::timestamp without time zone) + SELECT _materialized_hypertable_15.bucket, + _materialized_hypertable_15.min, + _materialized_hypertable_15.max, + _materialized_hypertable_15.avg, + _materialized_hypertable_15.sum + FROM _timescaledb_internal._materialized_hypertable_15 + WHERE _materialized_hypertable_15.bucket < COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(15)), '-infinity'::timestamp without time zone) UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1260,24 +1450,24 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(24)), '-infinity'::timestamp without time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(15)), '-infinity'::timestamp without time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); \set ON_ERROR_STOP 0 -- should fail because the cagg was overriden SELECT * FROM conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:279: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 +psql:include/cagg_migrate_common.sql:208: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 -- should fail because the old cagg was removed SELECT * FROM conditions_summary_daily_old; -psql:include/cagg_migrate_common.sql:281: ERROR: relation "conditions_summary_daily_old" does not exist at character 15 +psql:include/cagg_migrate_common.sql:210: ERROR: relation "conditions_summary_daily_old" does not exist at character 15 \set ON_ERROR_STOP 1 -- check policies after the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone --------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1021 | Retention Policy [1021] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 24 | {"drop_after": "@ 30 days", "hypertable_id": 24} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1022 | Refresh Continuous Aggregate Policy [1022] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 24 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 24} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1023 | Compression Policy [1023] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 24 | {"hypertable_id": 24, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + public | conditions_summary_daily | 1021 | Retention Policy [1021] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 15 | {"drop_after": "@ 30 days", "hypertable_id": 15} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1022 | Refresh Continuous Aggregate Policy [1022] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 15 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 15} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1023 | Compression Policy [1023] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 15 | {"hypertable_id": 15, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) -- should return no rows because the old cagg was removed @@ -1294,14 +1484,14 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d -- permission tests TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:291: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:220: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER; ALTER MATERIALIZED VIEW conditions_summary_weekly OWNER TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:298: ERROR: permission denied for table continuous_agg_migrate_plan +psql:include/cagg_migrate_common.sql:227: ERROR: permission denied for table continuous_agg_migrate_plan \set ON_ERROR_STOP 1 RESET ROLE; GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER; @@ -1309,7 +1499,7 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:308: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq +psql:include/cagg_migrate_common.sql:237: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq \set ON_ERROR_STOP 1 RESET ROLE; GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER; @@ -1317,14 +1507,14 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:318: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq +psql:include/cagg_migrate_common.sql:247: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq \set ON_ERROR_STOP 1 RESET ROLE; GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; -- all necessary permissions granted CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:327: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly_new', CAST('Mon Jan 02 00:00:00 2023' AS timestamp without time zone), NULL);" +psql:include/cagg_migrate_common.sql:256: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly_new', CAST('Mon Jan 02 00:00:00 2023' AS timestamp without time zone), NULL);" -- check migrated data. should return 0 (zero) rows SELECT * FROM conditions_summary_weekly EXCEPT @@ -1336,20 +1526,20 @@ SELECT * FROM conditions_summary_weekly_new; SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+----------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 17 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Mon Jan 02 00:00:00 2023"} - 17 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_weekly_new"} - 17 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} - 17 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Mon Jan 02 00:00:00 2023", "cagg_name_new": "conditions_summary_weekly_new", "window_start_type": "timestamp without time zone"} - 17 | 5 | FINISHED | COPY DATA | {"end_ts": "Mon Mar 07 00:00:00 2022", "start_ts": "Mon Dec 27 00:00:00 2021", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 17 | 6 | FINISHED | COPY DATA | {"end_ts": "Mon May 16 00:00:00 2022", "start_ts": "Mon Mar 07 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 17 | 7 | FINISHED | COPY DATA | {"end_ts": "Mon Jul 25 00:00:00 2022", "start_ts": "Mon May 16 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 17 | 8 | FINISHED | COPY DATA | {"end_ts": "Mon Oct 03 00:00:00 2022", "start_ts": "Mon Jul 25 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 17 | 9 | FINISHED | COPY DATA | {"end_ts": "Mon Dec 12 00:00:00 2022", "start_ts": "Mon Oct 03 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 17 | 10 | FINISHED | COPY DATA | {"end_ts": "Mon Feb 20 00:00:00 2023", "start_ts": "Mon Dec 12 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} - 17 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_weekly_new"} - 17 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} - 17 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} - 17 | 14 | FINISHED | ENABLE POLICIES | + 8 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Mon Jan 02 00:00:00 2023"} + 8 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_weekly_new"} + 8 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} + 8 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Mon Jan 02 00:00:00 2023", "cagg_name_new": "conditions_summary_weekly_new", "window_start_type": "timestamp without time zone"} + 8 | 5 | FINISHED | COPY DATA | {"end_ts": "Mon Mar 07 00:00:00 2022", "start_ts": "Mon Dec 27 00:00:00 2021", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 8 | 6 | FINISHED | COPY DATA | {"end_ts": "Mon May 16 00:00:00 2022", "start_ts": "Mon Mar 07 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 8 | 7 | FINISHED | COPY DATA | {"end_ts": "Mon Jul 25 00:00:00 2022", "start_ts": "Mon May 16 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 8 | 8 | FINISHED | COPY DATA | {"end_ts": "Mon Oct 03 00:00:00 2022", "start_ts": "Mon Jul 25 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 8 | 9 | FINISHED | COPY DATA | {"end_ts": "Mon Dec 12 00:00:00 2022", "start_ts": "Mon Oct 03 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 8 | 10 | FINISHED | COPY DATA | {"end_ts": "Mon Feb 20 00:00:00 2023", "start_ts": "Mon Dec 12 00:00:00 2022", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"} + 8 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_weekly_new"} + 8 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} + 8 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} + 8 | 14 | FINISHED | ENABLE POLICIES | (14 rows) RESET ROLE; @@ -1360,14 +1550,14 @@ RESET ROLE; -- execute transaction control statements. Transaction control statements are only -- allowed if CALL is executed in its own transaction.` TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:344: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:273: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_weekly_new; -psql:include/cagg_migrate_common.sql:345: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:274: NOTICE: drop cascades to 6 other objects \set ON_ERROR_STOP 0 BEGIN; -- should fail with `invalid transaction termination` CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:350: ERROR: invalid transaction termination +psql:include/cagg_migrate_common.sql:279: ERROR: invalid transaction termination ROLLBACK; \set ON_ERROR_STOP 1 CREATE FUNCTION execute_migration() RETURNS void AS @@ -1383,7 +1573,7 @@ LANGUAGE plpgsql; BEGIN; -- should fail with `invalid transaction termination` SELECT execute_migration(); -psql:include/cagg_migrate_common.sql:367: ERROR: invalid transaction termination +psql:include/cagg_migrate_common.sql:296: ERROR: invalid transaction termination ROLLBACK; \set ON_ERROR_STOP 1 -- cleanup @@ -1391,12 +1581,18 @@ DROP FUNCTION execute_migration(); REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER; REVOKE USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq FROM :ROLE_DEFAULT_PERM_USER; TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:375: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:304: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_daily; -psql:include/cagg_migrate_common.sql:376: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:305: NOTICE: drop cascades to 6 other objects DROP MATERIALIZED VIEW conditions_summary_weekly; -psql:include/cagg_migrate_common.sql:377: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:306: NOTICE: drop cascades to 6 other objects DROP TABLE conditions; +SELECT _timescaledb_functions.start_background_workers(); + start_background_workers +-------------------------- + t +(1 row) + -- ######################################################## -- ## TIMESTAMPTZ data type tests -- ######################################################## @@ -1406,35 +1602,185 @@ DROP TABLE conditions; -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\if :IS_DISTRIBUTED -\echo 'Running distributed hypertable tests' -\else -\echo 'Running local hypertable tests' -Running local hypertable tests -\endif -CREATE TABLE conditions ( - "time" :TIME_DIMENSION_DATATYPE NOT NULL, - temperature NUMERIC +-- Setup some variables +SELECT + format('\! zcat include/data/cagg_migrate_%1$s.sql.gz > %2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "ZCAT_CMD", + format('%2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "TEST_SCHEMA_FILE" +\gset +-- decompress dump file +:ZCAT_CMD +-- restore dump +SELECT timescaledb_pre_restore(); + timescaledb_pre_restore +------------------------- + t +(1 row) + +\ir :TEST_SCHEMA_FILE +CREATE TABLE public.conditions ( + "time" timestamp with time zone NOT NULL, + temperature numeric ); -\if :IS_DISTRIBUTED - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); - \else - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); - \endif -\else - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_hypertable('conditions', 'time'); - table_name ------------- - conditions +CREATE VIEW _timescaledb_internal._direct_view_10 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")); +CREATE VIEW _timescaledb_internal._direct_view_11 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")); +CREATE VIEW _timescaledb_internal._direct_view_12 AS + SELECT public.time_bucket('7 days'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('7 days'::interval, "time")); +CREATE TABLE _timescaledb_internal._materialized_hypertable_10 ( + bucket timestamp with time zone NOT NULL, + min numeric, + max numeric, + avg numeric, + sum numeric +); +CREATE TABLE _timescaledb_internal._materialized_hypertable_11 ( + bucket timestamp with time zone NOT NULL, + agg_2_2 bytea, + agg_3_3 bytea, + agg_4_4 bytea, + agg_5_5 bytea, + chunk_id integer +); +CREATE TABLE _timescaledb_internal._materialized_hypertable_12 ( + bucket timestamp with time zone NOT NULL, + agg_2_2 bytea, + agg_3_3 bytea, + agg_4_4 bytea, + agg_5_5 bytea, + chunk_id integer +); +CREATE VIEW _timescaledb_internal._partial_view_10 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + min(temperature) AS min, + max(temperature) AS max, + avg(temperature) AS avg, + sum(temperature) AS sum + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")); +CREATE VIEW _timescaledb_internal._partial_view_11 AS + SELECT public.time_bucket('1 day'::interval, "time") AS bucket, + _timescaledb_functions.partialize_agg(min(temperature)) AS agg_2_2, + _timescaledb_functions.partialize_agg(max(temperature)) AS agg_3_3, + _timescaledb_functions.partialize_agg(avg(temperature)) AS agg_4_4, + _timescaledb_functions.partialize_agg(sum(temperature)) AS agg_5_5, + _timescaledb_functions.chunk_id_from_relid(tableoid) AS chunk_id + FROM public.conditions + GROUP BY (public.time_bucket('1 day'::interval, "time")), (_timescaledb_functions.chunk_id_from_relid(tableoid)); +CREATE VIEW _timescaledb_internal._partial_view_12 AS + SELECT public.time_bucket('7 days'::interval, "time") AS bucket, + _timescaledb_functions.partialize_agg(min(temperature)) AS agg_2_2, + _timescaledb_functions.partialize_agg(max(temperature)) AS agg_3_3, + _timescaledb_functions.partialize_agg(avg(temperature)) AS agg_4_4, + _timescaledb_functions.partialize_agg(sum(temperature)) AS agg_5_5, + _timescaledb_functions.chunk_id_from_relid(tableoid) AS chunk_id + FROM public.conditions + GROUP BY (public.time_bucket('7 days'::interval, "time")), (_timescaledb_functions.chunk_id_from_relid(tableoid)); +CREATE VIEW public.conditions_summary_daily AS + SELECT _materialized_hypertable_11.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_11 + WHERE (_materialized_hypertable_11.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp with time zone)) + GROUP BY _materialized_hypertable_11.bucket +UNION ALL + SELECT public.time_bucket('1 day'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp with time zone)) + GROUP BY (public.time_bucket('1 day'::interval, conditions."time")); +CREATE VIEW public.conditions_summary_daily_new AS + SELECT _materialized_hypertable_10.bucket, + _materialized_hypertable_10.min, + _materialized_hypertable_10.max, + _materialized_hypertable_10.avg, + _materialized_hypertable_10.sum + FROM _timescaledb_internal._materialized_hypertable_10 + WHERE (_materialized_hypertable_10.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(10)), '-infinity'::timestamp with time zone)) +UNION ALL + SELECT public.time_bucket('1 day'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(10)), '-infinity'::timestamp with time zone)) + GROUP BY (public.time_bucket('1 day'::interval, conditions."time")); +CREATE VIEW public.conditions_summary_weekly AS + SELECT _materialized_hypertable_12.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_12.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_12.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_12.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_12.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_12 + WHERE (_materialized_hypertable_12.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(12)), '-infinity'::timestamp with time zone)) + GROUP BY _materialized_hypertable_12.bucket +UNION ALL + SELECT public.time_bucket('7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + avg(conditions.temperature) AS avg, + sum(conditions.temperature) AS sum + FROM public.conditions + WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(12)), '-infinity'::timestamp with time zone)) + GROUP BY (public.time_bucket('7 days'::interval, conditions."time")); +COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, replication_factor, status) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, bucket_width, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_materialization_invalidation_log (materialization_id, lowest_modified_value, greatest_modified_value) FROM stdin; +COPY _timescaledb_catalog.continuous_aggs_watermark (mat_hypertable_id, watermark) FROM stdin; +SELECT pg_catalog.setval('_timescaledb_catalog.dimension_id_seq', 12, true); + setval +-------- + 12 +(1 row) + +CREATE INDEX _materialized_hypertable_10_bucket_idx ON _timescaledb_internal._materialized_hypertable_10 USING btree (bucket DESC); +CREATE INDEX _materialized_hypertable_11_bucket_idx ON _timescaledb_internal._materialized_hypertable_11 USING btree (bucket DESC); +CREATE INDEX _materialized_hypertable_12_bucket_idx ON _timescaledb_internal._materialized_hypertable_12 USING btree (bucket DESC); +CREATE INDEX conditions_time_idx ON public.conditions USING btree ("time" DESC); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_10 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_11 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_12 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +CREATE TRIGGER ts_cagg_invalidation_trigger AFTER INSERT OR DELETE OR UPDATE ON public.conditions FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.continuous_agg_invalidation_trigger('9'); +CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON public.conditions FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker(); +SELECT timescaledb_post_restore(); + timescaledb_post_restore +-------------------------- + t +(1 row) + +-- Make sure no scheduled job will be executed during the regression tests +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t (1 row) - \else - SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); - \endif -\endif \if :IS_TIME_DIMENSION INSERT INTO conditions ("time", temperature) SELECT @@ -1447,85 +1793,24 @@ CREATE TABLE conditions ( SELECT coalesce(max(time), 0) FROM public.conditions $$; - \if :IS_DISTRIBUTED - SELECT - 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT" - \gset - CALL distributed_exec (:'STMT'); - \endif - SELECT set_integer_now_func('conditions', 'integer_now'); INSERT INTO conditions ("time", temperature) SELECT generate_series(1, 1000, 1), 0.25; \endif --- new cagg format (finalized=true) -CREATE MATERIALIZED VIEW conditions_summary_daily_new -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket -WITH NO DATA; --- older continuous aggregate to be migrated -CREATE MATERIALIZED VIEW conditions_summary_daily -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; -psql:include/cagg_migrate_common.sql:95: NOTICE: refreshing continuous aggregate "conditions_summary_daily" --- for permission tests -CREATE MATERIALIZED VIEW conditions_summary_weekly -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 week', "time") AS bucket, -\else - time_bucket(INTEGER '168', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; -psql:include/cagg_migrate_common.sql:113: NOTICE: refreshing continuous aggregate "conditions_summary_weekly" +CALL refresh_continuous_aggregate('conditions_summary_daily', NULL, NULL); +CALL refresh_continuous_aggregate('conditions_summary_weekly', NULL, NULL); \set ON_ERROR_STOP 0 -- should fail because we don't need to migrate finalized caggs CALL cagg_migrate('conditions_summary_daily_new'); -psql:include/cagg_migrate_common.sql:117: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration -\set ON_ERROR_STOP 1 -\set ON_ERROR_STOP 0 +psql:include/cagg_migrate_common.sql:46: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration -- should fail relation does not exist CALL cagg_migrate('conditions_summary_not_cagg'); -psql:include/cagg_migrate_common.sql:122: ERROR: relation "conditions_summary_not_cagg" does not exist at character 19 +psql:include/cagg_migrate_common.sql:49: ERROR: relation "conditions_summary_not_cagg" does not exist at character 19 CREATE TABLE conditions_summary_not_cagg(); -- should fail continuous agg does not exist CALL cagg_migrate('conditions_summary_not_cagg'); -psql:include/cagg_migrate_common.sql:125: ERROR: continuous aggregate "public.conditions_summary_not_cagg" does not exist +psql:include/cagg_migrate_common.sql:54: ERROR: continuous aggregate "public.conditions_summary_not_cagg" does not exist \set ON_ERROR_STOP 1 DROP TABLE conditions_summary_not_cagg; SELECT @@ -1545,7 +1830,7 @@ WHERE \set ON_ERROR_STOP 0 -- should fail because the new cagg with suffix '_new' already exists CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:147: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists +psql:include/cagg_migrate_common.sql:76: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists \set ON_ERROR_STOP 1 -- remove the new cagg to execute the migration DROP MATERIALIZED VIEW conditions_summary_daily_new; @@ -1561,15 +1846,15 @@ CALL _timescaledb_functions.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_s \x on SELECT mat_hypertable_id, user_view_definition FROM _timescaledb_catalog.continuous_agg_migrate_plan; -[ RECORD 1 ]--------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -mat_hypertable_id | 29 -user_view_definition | SELECT _materialized_hypertable_29.bucket, + - | _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_2_2, NULL::numeric) AS min,+ - | _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_3_3, NULL::numeric) AS max,+ - | _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_4_4, NULL::numeric) AS avg,+ - | _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_5_5, NULL::numeric) AS sum + - | FROM _timescaledb_internal._materialized_hypertable_29 + - | WHERE (_materialized_hypertable_29.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(29)), '-infinity'::timestamp with time zone)) + - | GROUP BY _materialized_hypertable_29.bucket + +mat_hypertable_id | 11 +user_view_definition | SELECT _materialized_hypertable_11.bucket, + + | _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_2_2, NULL::numeric) AS min,+ + | _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_3_3, NULL::numeric) AS max,+ + | _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_4_4, NULL::numeric) AS avg,+ + | _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_5_5, NULL::numeric) AS sum + + | FROM _timescaledb_internal._materialized_hypertable_11 + + | WHERE (_materialized_hypertable_11.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp with time zone)) + + | GROUP BY _materialized_hypertable_11.bucket + | UNION ALL + | SELECT public.time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + | min(conditions.temperature) AS min, + @@ -1577,62 +1862,62 @@ user_view_definition | SELECT _materialized_hypertable_29.bucket, | avg(conditions.temperature) AS avg, + | sum(conditions.temperature) AS sum + | FROM public.conditions + - | WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(29)), '-infinity'::timestamp with time zone)) + + | WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp with time zone)) + | GROUP BY (public.time_bucket('@ 1 day'::interval, conditions."time")); \x off SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+-------------+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 29 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} - 29 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} - 29 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null} - 29 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"} - 29 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 14 | NOT STARTED | ENABLE POLICIES | + 11 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} + 11 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} + 11 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null} + 11 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"} + 11 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 14 | NOT STARTED | ENABLE POLICIES | (14 rows) -- should resume the execution CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:169: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily" -psql:include/cagg_migrate_common.sql:169: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" +psql:include/cagg_migrate_common.sql:98: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily" +psql:include/cagg_migrate_common.sql:98: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+----------+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 29 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} - 29 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} - 29 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} - 29 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"} - 29 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 14 | FINISHED | ENABLE POLICIES | + 11 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} + 11 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} + 11 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} + 11 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"} + 11 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 14 | FINISHED | ENABLE POLICIES | (14 rows) \set ON_ERROR_STOP 0 -- should error because plan already exists CALL _timescaledb_functions.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new'); -psql:include/cagg_migrate_common.sql:174: ERROR: plan already exists for materialized hypertable 29 +psql:include/cagg_migrate_common.sql:103: ERROR: plan already exists for materialized hypertable 11 CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:175: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily +psql:include/cagg_migrate_common.sql:104: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily \set ON_ERROR_STOP 1 -- policies for test ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true); -psql:include/cagg_migrate_common.sql:179: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:108: NOTICE: defaulting compress_orderby to bucket \if :IS_TIME_DIMENSION SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval); add_retention_policy @@ -1664,19 +1949,19 @@ AND hypertable_name = :'MAT_TABLE_NAME' AND job_id >= 1000; job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name --------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------------------------+------------+---------------+-----------------------+-----------------------------+------------------------+------------------------------------------- - 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 29, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_29 | _timescaledb_functions | policy_compression_check - 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 29} | | | _timescaledb_internal | _materialized_hypertable_29 | _timescaledb_functions | policy_refresh_continuous_aggregate_check - 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 29} | | | _timescaledb_internal | _materialized_hypertable_29 | _timescaledb_functions | policy_retention_check + 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 11, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_11 | _timescaledb_functions | policy_compression_check + 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 11} | | | _timescaledb_internal | _materialized_hypertable_11 | _timescaledb_functions | policy_refresh_continuous_aggregate_check + 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 11} | | | _timescaledb_internal | _materialized_hypertable_11 | _timescaledb_functions | policy_retention_check (3 rows) -- execute the migration DROP MATERIALIZED VIEW conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:198: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:127: NOTICE: drop cascades to 6 other objects TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:199: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:128: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" CALL cagg_migrate('conditions_summary_daily'); -psql:include/cagg_migrate_common.sql:200: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:200: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" +psql:include/cagg_migrate_common.sql:129: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:129: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" SELECT ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID", h.schema_name AS "NEW_MAT_SCHEMA_NAME", @@ -1701,13 +1986,13 @@ WHERE avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_33.bucket, - _materialized_hypertable_33.min, - _materialized_hypertable_33.max, - _materialized_hypertable_33.avg, - _materialized_hypertable_33.sum - FROM _timescaledb_internal._materialized_hypertable_33 - WHERE _materialized_hypertable_33.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(33)), '-infinity'::timestamp with time zone) + SELECT _materialized_hypertable_20.bucket, + _materialized_hypertable_20.min, + _materialized_hypertable_20.max, + _materialized_hypertable_20.avg, + _materialized_hypertable_20.sum + FROM _timescaledb_internal._materialized_hypertable_20 + WHERE _materialized_hypertable_20.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(20)), '-infinity'::timestamp with time zone) UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1715,7 +2000,7 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(33)), '-infinity'::timestamp with time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(20)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); SELECT * @@ -1725,28 +2010,28 @@ AND hypertable_name = :'NEW_MAT_TABLE_NAME' AND job_id >= 1000; job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name --------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------------------------+------------+---------------+-----------------------+-----------------------------+------------------------+------------------------------------------- - 1029 | Compression Policy [1029] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 33, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_33 | _timescaledb_functions | policy_compression_check - 1028 | Refresh Continuous Aggregate Policy [1028] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 33} | | | _timescaledb_internal | _materialized_hypertable_33 | _timescaledb_functions | policy_refresh_continuous_aggregate_check - 1027 | Retention Policy [1027] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 33} | | | _timescaledb_internal | _materialized_hypertable_33 | _timescaledb_functions | policy_retention_check + 1029 | Compression Policy [1029] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 20, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_compression_check + 1028 | Refresh Continuous Aggregate Policy [1028] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 20} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_refresh_continuous_aggregate_check + 1027 | Retention Policy [1027] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 20} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_retention_check (3 rows) SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+----------+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 29 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} - 29 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} - 29 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1026, 1024]} - 29 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"} - 29 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 29 | 11 | FINISHED | COPY POLICIES | {"policies": [1026, 1025, 1024], "cagg_name_new": "conditions_summary_daily_new"} - 29 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} - 29 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1027, 1028, 1029, 1026, 1025, 1024]} + 11 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"} + 11 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"} + 11 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1026, 1024]} + 11 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"} + 11 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 11 | 11 | FINISHED | COPY POLICIES | {"policies": [1026, 1025, 1024], "cagg_name_new": "conditions_summary_daily_new"} + 11 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"} + 11 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1027, 1028, 1029, 1026, 1025, 1024]} (14 rows) -- check migrated data. should return 0 (zero) rows @@ -1761,23 +2046,23 @@ SELECT * FROM conditions_summary_daily_new; SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text; compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_29_344_chunk - _timescaledb_internal._hyper_29_345_chunk - _timescaledb_internal._hyper_29_346_chunk - _timescaledb_internal._hyper_29_347_chunk - _timescaledb_internal._hyper_29_348_chunk - _timescaledb_internal._hyper_29_349_chunk + _timescaledb_internal._hyper_11_344_chunk + _timescaledb_internal._hyper_11_345_chunk + _timescaledb_internal._hyper_11_346_chunk + _timescaledb_internal._hyper_11_347_chunk + _timescaledb_internal._hyper_11_348_chunk + _timescaledb_internal._hyper_11_349_chunk (6 rows) SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text; compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_33_362_chunk - _timescaledb_internal._hyper_33_363_chunk - _timescaledb_internal._hyper_33_364_chunk - _timescaledb_internal._hyper_33_365_chunk - _timescaledb_internal._hyper_33_366_chunk - _timescaledb_internal._hyper_33_367_chunk + _timescaledb_internal._hyper_20_362_chunk + _timescaledb_internal._hyper_20_363_chunk + _timescaledb_internal._hyper_20_364_chunk + _timescaledb_internal._hyper_20_365_chunk + _timescaledb_internal._hyper_20_366_chunk + _timescaledb_internal._hyper_20_367_chunk (6 rows) -- check migrated data after compression. should return 0 (zero) rows @@ -1795,21 +2080,21 @@ JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id ORDER BY bgw_job.id; -- test migration overriding the new cagg and keeping the old DROP MATERIALIZED VIEW conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:248: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:177: NOTICE: drop cascades to 6 other objects TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:178: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" -- check policies before the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone --------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 29 | {"drop_after": "@ 30 days", "hypertable_id": 29} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 29 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 29} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 29 | {"hypertable_id": 29, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + public | conditions_summary_daily | 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 11 | {"drop_after": "@ 30 days", "hypertable_id": 11} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 11 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 11} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 11 | {"hypertable_id": 11, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) CALL cagg_migrate('conditions_summary_daily', override => TRUE); -psql:include/cagg_migrate_common.sql:252: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:252: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" +psql:include/cagg_migrate_common.sql:181: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:181: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" -- cagg with the new format because it was overriden \d+ conditions_summary_daily View "public.conditions_summary_daily" @@ -1821,13 +2106,13 @@ psql:include/cagg_migrate_common.sql:252: WARNING: refresh the continuous aggre avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_35.bucket, - _materialized_hypertable_35.min, - _materialized_hypertable_35.max, - _materialized_hypertable_35.avg, - _materialized_hypertable_35.sum - FROM _timescaledb_internal._materialized_hypertable_35 - WHERE _materialized_hypertable_35.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(35)), '-infinity'::timestamp with time zone) + SELECT _materialized_hypertable_22.bucket, + _materialized_hypertable_22.min, + _materialized_hypertable_22.max, + _materialized_hypertable_22.avg, + _materialized_hypertable_22.sum + FROM _timescaledb_internal._materialized_hypertable_22 + WHERE _materialized_hypertable_22.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(22)), '-infinity'::timestamp with time zone) UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1835,7 +2120,7 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(35)), '-infinity'::timestamp with time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(22)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); -- cagg with the old format because it was overriden @@ -1849,14 +2134,14 @@ UNION ALL avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_29.bucket, - _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_2_2, NULL::numeric) AS min, - _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_3_3, NULL::numeric) AS max, - _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_4_4, NULL::numeric) AS avg, - _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_29.agg_5_5, NULL::numeric) AS sum - FROM _timescaledb_internal._materialized_hypertable_29 - WHERE _materialized_hypertable_29.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(29)), '-infinity'::timestamp with time zone) - GROUP BY _materialized_hypertable_29.bucket + SELECT _materialized_hypertable_11.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_2_2, NULL::numeric) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_3_3, NULL::numeric) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_4_4, NULL::numeric) AS avg, + _timescaledb_functions.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_11.agg_5_5, NULL::numeric) AS sum + FROM _timescaledb_internal._materialized_hypertable_11 + WHERE _materialized_hypertable_11.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp with time zone) + GROUP BY _materialized_hypertable_11.bucket UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1864,30 +2149,30 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(29)), '-infinity'::timestamp with time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(11)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); \set ON_ERROR_STOP 0 -- should fail because the cagg was overriden SELECT * FROM conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 +psql:include/cagg_migrate_common.sql:188: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 \set ON_ERROR_STOP 1 -- check policies after the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone --------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1030 | Retention Policy [1030] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 35 | {"drop_after": "@ 30 days", "hypertable_id": 35} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1031 | Refresh Continuous Aggregate Policy [1031] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 35 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 35} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1032 | Compression Policy [1032] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 35 | {"hypertable_id": 35, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + public | conditions_summary_daily | 1030 | Retention Policy [1030] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 22 | {"drop_after": "@ 30 days", "hypertable_id": 22} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1031 | Refresh Continuous Aggregate Policy [1031] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 22 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 22} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1032 | Compression Policy [1032] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 22 | {"hypertable_id": 22, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) -- should return the old cagg jobs SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone --------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily_old | 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 29 | {"drop_after": "@ 30 days", "hypertable_id": 29} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily_old | 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 29 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 29} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily_old | 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 29 | {"hypertable_id": 29, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + public | conditions_summary_daily_old | 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 11 | {"drop_after": "@ 30 days", "hypertable_id": 11} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily_old | 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 11 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 11} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily_old | 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 11 | {"hypertable_id": 11, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) -- should return no rows because the cagg was overwritten @@ -1898,26 +2183,26 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d -- test migration overriding the new cagg and removing the old TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:269: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:198: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_daily; -psql:include/cagg_migrate_common.sql:270: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:199: NOTICE: drop cascades to 6 other objects ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily; -- check policies before the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone --------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 29 | {"drop_after": "@ 30 days", "hypertable_id": 29} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 29 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 29} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 29 | {"hypertable_id": 29, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + public | conditions_summary_daily | 1024 | Retention Policy [1024] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 11 | {"drop_after": "@ 30 days", "hypertable_id": 11} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1025 | Refresh Continuous Aggregate Policy [1025] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 11 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 11} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1026 | Compression Policy [1026] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 11 | {"hypertable_id": 11, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE); -psql:include/cagg_migrate_common.sql:274: NOTICE: defaulting compress_orderby to bucket -psql:include/cagg_migrate_common.sql:274: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" -psql:include/cagg_migrate_common.sql:274: NOTICE: drop cascades to 6 other objects -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1026 not found, skipping -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1025 not found, skipping -psql:include/cagg_migrate_common.sql:274: NOTICE: job 1024 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:203: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);" +psql:include/cagg_migrate_common.sql:203: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1026 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1025 not found, skipping +psql:include/cagg_migrate_common.sql:203: NOTICE: job 1024 not found, skipping -- cagg with the new format because it was overriden \d+ conditions_summary_daily View "public.conditions_summary_daily" @@ -1929,13 +2214,13 @@ psql:include/cagg_migrate_common.sql:274: NOTICE: job 1024 not found, skipping avg | numeric | | | | main | sum | numeric | | | | main | View definition: - SELECT _materialized_hypertable_37.bucket, - _materialized_hypertable_37.min, - _materialized_hypertable_37.max, - _materialized_hypertable_37.avg, - _materialized_hypertable_37.sum - FROM _timescaledb_internal._materialized_hypertable_37 - WHERE _materialized_hypertable_37.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(37)), '-infinity'::timestamp with time zone) + SELECT _materialized_hypertable_24.bucket, + _materialized_hypertable_24.min, + _materialized_hypertable_24.max, + _materialized_hypertable_24.avg, + _materialized_hypertable_24.sum + FROM _timescaledb_internal._materialized_hypertable_24 + WHERE _materialized_hypertable_24.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(24)), '-infinity'::timestamp with time zone) UNION ALL SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket, min(conditions.temperature) AS min, @@ -1943,24 +2228,24 @@ UNION ALL avg(conditions.temperature) AS avg, sum(conditions.temperature) AS sum FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(37)), '-infinity'::timestamp with time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(24)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 1 day'::interval, conditions."time")); \set ON_ERROR_STOP 0 -- should fail because the cagg was overriden SELECT * FROM conditions_summary_daily_new; -psql:include/cagg_migrate_common.sql:279: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 +psql:include/cagg_migrate_common.sql:208: ERROR: relation "conditions_summary_daily_new" does not exist at character 15 -- should fail because the old cagg was removed SELECT * FROM conditions_summary_daily_old; -psql:include/cagg_migrate_common.sql:281: ERROR: relation "conditions_summary_daily_old" does not exist at character 15 +psql:include/cagg_migrate_common.sql:210: ERROR: relation "conditions_summary_daily_old" does not exist at character 15 \set ON_ERROR_STOP 1 -- check policies after the migration SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily'; schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone --------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+---------- - public | conditions_summary_daily | 1033 | Retention Policy [1033] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 37 | {"drop_after": "@ 30 days", "hypertable_id": 37} | _timescaledb_functions | policy_retention_check | - public | conditions_summary_daily | 1034 | Refresh Continuous Aggregate Policy [1034] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 37 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 37} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | - public | conditions_summary_daily | 1035 | Compression Policy [1035] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 37 | {"hypertable_id": 37, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | + public | conditions_summary_daily | 1033 | Retention Policy [1033] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 24 | {"drop_after": "@ 30 days", "hypertable_id": 24} | _timescaledb_functions | policy_retention_check | + public | conditions_summary_daily | 1034 | Refresh Continuous Aggregate Policy [1034] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 24 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 24} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | + public | conditions_summary_daily | 1035 | Compression Policy [1035] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 24 | {"hypertable_id": 24, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check | (3 rows) -- should return no rows because the old cagg was removed @@ -1977,14 +2262,14 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d -- permission tests TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:291: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:220: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER; ALTER MATERIALIZED VIEW conditions_summary_weekly OWNER TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:298: ERROR: permission denied for table continuous_agg_migrate_plan +psql:include/cagg_migrate_common.sql:227: ERROR: permission denied for table continuous_agg_migrate_plan \set ON_ERROR_STOP 1 RESET ROLE; GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER; @@ -1992,7 +2277,7 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:308: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq +psql:include/cagg_migrate_common.sql:237: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq \set ON_ERROR_STOP 1 RESET ROLE; GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER; @@ -2000,14 +2285,14 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; \set ON_ERROR_STOP 0 -- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:318: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq +psql:include/cagg_migrate_common.sql:247: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq \set ON_ERROR_STOP 1 RESET ROLE; GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; -- all necessary permissions granted CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:327: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly_new', CAST('Mon Jan 02 00:00:00 2023' AS timestamp with time zone), NULL);" +psql:include/cagg_migrate_common.sql:256: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly_new', CAST('Mon Jan 02 00:00:00 2023' AS timestamp with time zone), NULL);" -- check migrated data. should return 0 (zero) rows SELECT * FROM conditions_summary_weekly EXCEPT @@ -2019,20 +2304,20 @@ SELECT * FROM conditions_summary_weekly_new; SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id; mat_hypertable_id | step_id | status | type | config -------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 30 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Mon Jan 02 00:00:00 2023"} - 30 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_weekly_new"} - 30 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} - 30 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Mon Jan 02 00:00:00 2023", "cagg_name_new": "conditions_summary_weekly_new", "window_start_type": "timestamp with time zone"} - 30 | 5 | FINISHED | COPY DATA | {"end_ts": "Sun Mar 06 16:00:00 2022 PST", "start_ts": "Sun Dec 26 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 30 | 6 | FINISHED | COPY DATA | {"end_ts": "Sun May 15 16:00:00 2022 PDT", "start_ts": "Sun Mar 06 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 30 | 7 | FINISHED | COPY DATA | {"end_ts": "Sun Jul 24 16:00:00 2022 PDT", "start_ts": "Sun May 15 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 30 | 8 | FINISHED | COPY DATA | {"end_ts": "Sun Oct 02 16:00:00 2022 PDT", "start_ts": "Sun Jul 24 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 30 | 9 | FINISHED | COPY DATA | {"end_ts": "Sun Dec 11 16:00:00 2022 PST", "start_ts": "Sun Oct 02 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 30 | 10 | FINISHED | COPY DATA | {"end_ts": "Sun Feb 19 16:00:00 2023 PST", "start_ts": "Sun Dec 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} - 30 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_weekly_new"} - 30 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} - 30 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} - 30 | 14 | FINISHED | ENABLE POLICIES | + 12 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Mon Jan 02 00:00:00 2023"} + 12 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_weekly_new"} + 12 | 3 | FINISHED | DISABLE POLICIES | {"policies": null} + 12 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Mon Jan 02 00:00:00 2023", "cagg_name_new": "conditions_summary_weekly_new", "window_start_type": "timestamp with time zone"} + 12 | 5 | FINISHED | COPY DATA | {"end_ts": "Sun Mar 06 16:00:00 2022 PST", "start_ts": "Sun Dec 26 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 12 | 6 | FINISHED | COPY DATA | {"end_ts": "Sun May 15 16:00:00 2022 PDT", "start_ts": "Sun Mar 06 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 12 | 7 | FINISHED | COPY DATA | {"end_ts": "Sun Jul 24 16:00:00 2022 PDT", "start_ts": "Sun May 15 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 12 | 8 | FINISHED | COPY DATA | {"end_ts": "Sun Oct 02 16:00:00 2022 PDT", "start_ts": "Sun Jul 24 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 12 | 9 | FINISHED | COPY DATA | {"end_ts": "Sun Dec 11 16:00:00 2022 PST", "start_ts": "Sun Oct 02 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 12 | 10 | FINISHED | COPY DATA | {"end_ts": "Sun Feb 19 16:00:00 2023 PST", "start_ts": "Sun Dec 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_weekly_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"} + 12 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_weekly_new"} + 12 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} + 12 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_weekly_new"} + 12 | 14 | FINISHED | ENABLE POLICIES | (14 rows) RESET ROLE; @@ -2043,14 +2328,14 @@ RESET ROLE; -- execute transaction control statements. Transaction control statements are only -- allowed if CALL is executed in its own transaction.` TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:344: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:273: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_weekly_new; -psql:include/cagg_migrate_common.sql:345: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:274: NOTICE: drop cascades to 6 other objects \set ON_ERROR_STOP 0 BEGIN; -- should fail with `invalid transaction termination` CALL cagg_migrate('conditions_summary_weekly'); -psql:include/cagg_migrate_common.sql:350: ERROR: invalid transaction termination +psql:include/cagg_migrate_common.sql:279: ERROR: invalid transaction termination ROLLBACK; \set ON_ERROR_STOP 1 CREATE FUNCTION execute_migration() RETURNS void AS @@ -2066,7 +2351,7 @@ LANGUAGE plpgsql; BEGIN; -- should fail with `invalid transaction termination` SELECT execute_migration(); -psql:include/cagg_migrate_common.sql:367: ERROR: invalid transaction termination +psql:include/cagg_migrate_common.sql:296: ERROR: invalid transaction termination ROLLBACK; \set ON_ERROR_STOP 1 -- cleanup @@ -2074,9 +2359,15 @@ DROP FUNCTION execute_migration(); REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER; REVOKE USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq FROM :ROLE_DEFAULT_PERM_USER; TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE; -psql:include/cagg_migrate_common.sql:375: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" +psql:include/cagg_migrate_common.sql:304: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" DROP MATERIALIZED VIEW conditions_summary_daily; -psql:include/cagg_migrate_common.sql:376: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:305: NOTICE: drop cascades to 6 other objects DROP MATERIALIZED VIEW conditions_summary_weekly; -psql:include/cagg_migrate_common.sql:377: NOTICE: drop cascades to 6 other objects +psql:include/cagg_migrate_common.sql:306: NOTICE: drop cascades to 6 other objects DROP TABLE conditions; +SELECT _timescaledb_functions.start_background_workers(); + start_background_workers +-------------------------- + t +(1 row) + diff --git a/tsl/test/expected/cagg_repair-13.out b/tsl/test/expected/cagg_repair-13.out index 6143f9ef96d..ad731110b5a 100644 --- a/tsl/test/expected/cagg_repair-13.out +++ b/tsl/test/expected/cagg_repair-13.out @@ -311,45 +311,4 @@ UNION ALL WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); --- Tests with old cagg format -CREATE MATERIALIZED VIEW conditions_summary_old_format -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket(INTERVAL '1 week', "time") AS bucket, - MIN(temperature), - MAX(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - 1 -WITH NO DATA; --- Should rebuild without forcing -CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); -DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_old_format has been rebuilt! -\d+ conditions_summary_old_format - View "public.conditions_summary_old_format" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - min | integer | | | | plain | - max | integer | | | | plain | - sum | bigint | | | | plain | -View definition: - SELECT _materialized_hypertable_4.bucket, - _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::integer) AS min, - _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::integer) AS max, - _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::bigint) AS sum - FROM _timescaledb_internal._materialized_hypertable_4 - WHERE _materialized_hypertable_4.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY _materialized_hypertable_4.bucket -UNION ALL - SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, - min(conditions.temperature) AS min, - max(conditions.temperature) AS max, - sum(conditions.temperature) AS sum - FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); - DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/expected/cagg_repair-14.out b/tsl/test/expected/cagg_repair-14.out index 6143f9ef96d..ad731110b5a 100644 --- a/tsl/test/expected/cagg_repair-14.out +++ b/tsl/test/expected/cagg_repair-14.out @@ -311,45 +311,4 @@ UNION ALL WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); --- Tests with old cagg format -CREATE MATERIALIZED VIEW conditions_summary_old_format -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket(INTERVAL '1 week', "time") AS bucket, - MIN(temperature), - MAX(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - 1 -WITH NO DATA; --- Should rebuild without forcing -CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); -DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_old_format has been rebuilt! -\d+ conditions_summary_old_format - View "public.conditions_summary_old_format" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - min | integer | | | | plain | - max | integer | | | | plain | - sum | bigint | | | | plain | -View definition: - SELECT _materialized_hypertable_4.bucket, - _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::integer) AS min, - _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::integer) AS max, - _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::bigint) AS sum - FROM _timescaledb_internal._materialized_hypertable_4 - WHERE _materialized_hypertable_4.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY _materialized_hypertable_4.bucket -UNION ALL - SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, - min(conditions.temperature) AS min, - max(conditions.temperature) AS max, - sum(conditions.temperature) AS sum - FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); - DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/expected/cagg_repair-15.out b/tsl/test/expected/cagg_repair-15.out index 6143f9ef96d..ad731110b5a 100644 --- a/tsl/test/expected/cagg_repair-15.out +++ b/tsl/test/expected/cagg_repair-15.out @@ -311,45 +311,4 @@ UNION ALL WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); --- Tests with old cagg format -CREATE MATERIALIZED VIEW conditions_summary_old_format -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket(INTERVAL '1 week', "time") AS bucket, - MIN(temperature), - MAX(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - 1 -WITH NO DATA; --- Should rebuild without forcing -CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); -DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_old_format has been rebuilt! -\d+ conditions_summary_old_format - View "public.conditions_summary_old_format" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - min | integer | | | | plain | - max | integer | | | | plain | - sum | bigint | | | | plain | -View definition: - SELECT _materialized_hypertable_4.bucket, - _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::integer) AS min, - _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::integer) AS max, - _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::bigint) AS sum - FROM _timescaledb_internal._materialized_hypertable_4 - WHERE _materialized_hypertable_4.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY _materialized_hypertable_4.bucket -UNION ALL - SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, - min(conditions.temperature) AS min, - max(conditions.temperature) AS max, - sum(conditions.temperature) AS sum - FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); - DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/expected/cagg_repair-16.out b/tsl/test/expected/cagg_repair-16.out index bd9c628773b..ffb4db7beb1 100644 --- a/tsl/test/expected/cagg_repair-16.out +++ b/tsl/test/expected/cagg_repair-16.out @@ -311,45 +311,4 @@ UNION ALL WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); --- Tests with old cagg format -CREATE MATERIALIZED VIEW conditions_summary_old_format -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket(INTERVAL '1 week', "time") AS bucket, - MIN(temperature), - MAX(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - 1 -WITH NO DATA; --- Should rebuild without forcing -CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); -DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_old_format has been rebuilt! -\d+ conditions_summary_old_format - View "public.conditions_summary_old_format" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - min | integer | | | | plain | - max | integer | | | | plain | - sum | bigint | | | | plain | -View definition: - SELECT _materialized_hypertable_4.bucket, - _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::integer) AS min, - _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::integer) AS max, - _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::bigint) AS sum - FROM _timescaledb_internal._materialized_hypertable_4 - WHERE _materialized_hypertable_4.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY _materialized_hypertable_4.bucket -UNION ALL - SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, - min(conditions.temperature) AS min, - max(conditions.temperature) AS max, - sum(conditions.temperature) AS sum - FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) - GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); - DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/expected/continuous_aggs-13.out b/tsl/test/expected/continuous_aggs-13.out index 2609b152a9d..8bd346f2aaf 100644 --- a/tsl/test/expected/continuous_aggs-13.out +++ b/tsl/test/expected/continuous_aggs-13.out @@ -2213,16 +2213,10 @@ AS SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) FROM conditions GROUP BY time_bucket('1day', timec) WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_old(timec, minl, sumt, sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -AS -SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) -FROM conditions -GROUP BY time_bucket('1day', timec) WITH NO DATA; \x ON SELECT * FROM timescaledb_information.continuous_aggregates -WHERE view_name IN ('conditions_summary_new', 'conditions_summary_old'); +WHERE view_name = 'conditions_summary_new'; -[ RECORD 1 ]---------------------+--------------------------------------------------------------------- hypertable_schema | public hypertable_name | conditions @@ -2240,52 +2234,18 @@ view_definition | SELECT time_bucket('@ 1 day'::interval, con | FROM conditions + | GROUP BY (time_bucket('@ 1 day'::interval, conditions.timec)); finalized | t --[ RECORD 2 ]---------------------+--------------------------------------------------------------------- -hypertable_schema | public -hypertable_name | conditions -view_schema | public -view_name | conditions_summary_old -view_owner | default_perm_user -materialized_only | t -compression_enabled | f -materialization_hypertable_schema | _timescaledb_internal -materialization_hypertable_name | _materialized_hypertable_62 -view_definition | SELECT time_bucket('@ 1 day'::interval, conditions.timec) AS timec,+ - | min(conditions.location) AS minl, + - | sum(conditions.temperature) AS sumt, + - | sum(conditions.humidity) AS sumh + - | FROM conditions + - | GROUP BY (time_bucket('@ 1 day'::interval, conditions.timec)); -finalized | f \x OFF CALL refresh_continuous_aggregate('conditions_summary_new', NULL, NULL); -CALL refresh_continuous_aggregate('conditions_summary_old', NULL, NULL); -- Check and compare number of returned rows -SELECT count(*) FROM conditions_summary_new -UNION -SELECT count(*) FROM conditions_summary_old; +SELECT count(*) FROM conditions_summary_new; count ------- 4 (1 row) --- Should return 4 rows that is the same number of rows above -SELECT * -FROM conditions_summary_new -NATURAL JOIN conditions_summary_old -ORDER BY timec; - timec | minl | sumt | sumh -------------------------------+------+------+------ - Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 - Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 - Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 - Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 -(4 rows) - -- Parallel planning test for realtime Continuous Aggregate DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 4 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( @@ -2323,25 +2283,25 @@ EXPLAIN (COSTS OFF, TIMING OFF) SELECT * FROM conditions_daily WHERE time_bucket QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Merge Append - Sort Key: _materialized_hypertable_64.sum DESC + Sort Key: _materialized_hypertable_63.sum DESC -> Sort - Sort Key: _materialized_hypertable_64.sum DESC - -> Custom Scan (ChunkAppend) on _materialized_hypertable_64 + Sort Key: _materialized_hypertable_63.sum DESC + -> Custom Scan (ChunkAppend) on _materialized_hypertable_63 Chunks excluded during startup: 0 - -> Index Scan using _hyper_64_185_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_185_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_189_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_189_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_190_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_190_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_183_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_183_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_187_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_187_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_188_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_188_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) -> Sort Sort Key: (sum(conditions.temperature)) DESC -> HashAggregate Group Key: time_bucket('@ 1 day'::interval, conditions.timec) -> Custom Scan (ChunkAppend) on conditions Chunks excluded during startup: 26 - -> Index Scan Backward using _hyper_63_184_chunk_conditions_timec_idx on _hyper_63_184_chunk - Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan Backward using _hyper_62_182_chunk_conditions_timec_idx on _hyper_62_182_chunk + Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) Filter: (time_bucket('@ 1 day'::interval, timec) >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone) (21 rows) diff --git a/tsl/test/expected/continuous_aggs-14.out b/tsl/test/expected/continuous_aggs-14.out index 89323494a28..e43ea781b98 100644 --- a/tsl/test/expected/continuous_aggs-14.out +++ b/tsl/test/expected/continuous_aggs-14.out @@ -2212,16 +2212,10 @@ AS SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) FROM conditions GROUP BY time_bucket('1day', timec) WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_old(timec, minl, sumt, sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -AS -SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) -FROM conditions -GROUP BY time_bucket('1day', timec) WITH NO DATA; \x ON SELECT * FROM timescaledb_information.continuous_aggregates -WHERE view_name IN ('conditions_summary_new', 'conditions_summary_old'); +WHERE view_name = 'conditions_summary_new'; -[ RECORD 1 ]---------------------+--------------------------------------------------------------------- hypertable_schema | public hypertable_name | conditions @@ -2239,52 +2233,18 @@ view_definition | SELECT time_bucket('@ 1 day'::interval, con | FROM conditions + | GROUP BY (time_bucket('@ 1 day'::interval, conditions.timec)); finalized | t --[ RECORD 2 ]---------------------+--------------------------------------------------------------------- -hypertable_schema | public -hypertable_name | conditions -view_schema | public -view_name | conditions_summary_old -view_owner | default_perm_user -materialized_only | t -compression_enabled | f -materialization_hypertable_schema | _timescaledb_internal -materialization_hypertable_name | _materialized_hypertable_62 -view_definition | SELECT time_bucket('@ 1 day'::interval, conditions.timec) AS timec,+ - | min(conditions.location) AS minl, + - | sum(conditions.temperature) AS sumt, + - | sum(conditions.humidity) AS sumh + - | FROM conditions + - | GROUP BY (time_bucket('@ 1 day'::interval, conditions.timec)); -finalized | f \x OFF CALL refresh_continuous_aggregate('conditions_summary_new', NULL, NULL); -CALL refresh_continuous_aggregate('conditions_summary_old', NULL, NULL); -- Check and compare number of returned rows -SELECT count(*) FROM conditions_summary_new -UNION -SELECT count(*) FROM conditions_summary_old; +SELECT count(*) FROM conditions_summary_new; count ------- 4 (1 row) --- Should return 4 rows that is the same number of rows above -SELECT * -FROM conditions_summary_new -NATURAL JOIN conditions_summary_old -ORDER BY timec; - timec | minl | sumt | sumh -------------------------------+------+------+------ - Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 - Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 - Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 - Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 -(4 rows) - -- Parallel planning test for realtime Continuous Aggregate DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 4 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( @@ -2322,25 +2282,25 @@ EXPLAIN (COSTS OFF, TIMING OFF) SELECT * FROM conditions_daily WHERE time_bucket QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Merge Append - Sort Key: _materialized_hypertable_64.sum DESC + Sort Key: _materialized_hypertable_63.sum DESC -> Sort - Sort Key: _materialized_hypertable_64.sum DESC - -> Custom Scan (ChunkAppend) on _materialized_hypertable_64 + Sort Key: _materialized_hypertable_63.sum DESC + -> Custom Scan (ChunkAppend) on _materialized_hypertable_63 Chunks excluded during startup: 0 - -> Index Scan using _hyper_64_185_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_185_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_189_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_189_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_190_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_190_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_183_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_183_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_187_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_187_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_188_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_188_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) -> Sort Sort Key: (sum(conditions.temperature)) DESC -> HashAggregate Group Key: time_bucket('@ 1 day'::interval, conditions.timec) -> Custom Scan (ChunkAppend) on conditions Chunks excluded during startup: 26 - -> Index Scan Backward using _hyper_63_184_chunk_conditions_timec_idx on _hyper_63_184_chunk - Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan Backward using _hyper_62_182_chunk_conditions_timec_idx on _hyper_62_182_chunk + Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) Filter: (time_bucket('@ 1 day'::interval, timec) >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone) (21 rows) diff --git a/tsl/test/expected/continuous_aggs-15.out b/tsl/test/expected/continuous_aggs-15.out index 953bd51735f..f196a4ad743 100644 --- a/tsl/test/expected/continuous_aggs-15.out +++ b/tsl/test/expected/continuous_aggs-15.out @@ -2214,16 +2214,10 @@ AS SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) FROM conditions GROUP BY time_bucket('1day', timec) WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_old(timec, minl, sumt, sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -AS -SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) -FROM conditions -GROUP BY time_bucket('1day', timec) WITH NO DATA; \x ON SELECT * FROM timescaledb_information.continuous_aggregates -WHERE view_name IN ('conditions_summary_new', 'conditions_summary_old'); +WHERE view_name = 'conditions_summary_new'; -[ RECORD 1 ]---------------------+--------------------------------------------------------------------- hypertable_schema | public hypertable_name | conditions @@ -2241,52 +2235,18 @@ view_definition | SELECT time_bucket('@ 1 day'::interval, con | FROM conditions + | GROUP BY (time_bucket('@ 1 day'::interval, conditions.timec)); finalized | t --[ RECORD 2 ]---------------------+--------------------------------------------------------------------- -hypertable_schema | public -hypertable_name | conditions -view_schema | public -view_name | conditions_summary_old -view_owner | default_perm_user -materialized_only | t -compression_enabled | f -materialization_hypertable_schema | _timescaledb_internal -materialization_hypertable_name | _materialized_hypertable_62 -view_definition | SELECT time_bucket('@ 1 day'::interval, conditions.timec) AS timec,+ - | min(conditions.location) AS minl, + - | sum(conditions.temperature) AS sumt, + - | sum(conditions.humidity) AS sumh + - | FROM conditions + - | GROUP BY (time_bucket('@ 1 day'::interval, conditions.timec)); -finalized | f \x OFF CALL refresh_continuous_aggregate('conditions_summary_new', NULL, NULL); -CALL refresh_continuous_aggregate('conditions_summary_old', NULL, NULL); -- Check and compare number of returned rows -SELECT count(*) FROM conditions_summary_new -UNION -SELECT count(*) FROM conditions_summary_old; +SELECT count(*) FROM conditions_summary_new; count ------- 4 (1 row) --- Should return 4 rows that is the same number of rows above -SELECT * -FROM conditions_summary_new -NATURAL JOIN conditions_summary_old -ORDER BY timec; - timec | minl | sumt | sumh -------------------------------+------+------+------ - Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 - Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 - Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 - Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 -(4 rows) - -- Parallel planning test for realtime Continuous Aggregate DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 4 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( @@ -2324,17 +2284,17 @@ EXPLAIN (COSTS OFF, TIMING OFF) SELECT * FROM conditions_daily WHERE time_bucket QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Merge Append - Sort Key: _materialized_hypertable_64.sum DESC + Sort Key: _materialized_hypertable_63.sum DESC -> Sort - Sort Key: _materialized_hypertable_64.sum DESC - -> Custom Scan (ChunkAppend) on _materialized_hypertable_64 + Sort Key: _materialized_hypertable_63.sum DESC + -> Custom Scan (ChunkAppend) on _materialized_hypertable_63 Chunks excluded during startup: 0 - -> Index Scan using _hyper_64_185_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_185_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_189_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_189_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_190_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_190_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_183_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_183_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_187_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_187_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_188_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_188_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) -> Sort Sort Key: (sum(conditions.temperature)) DESC -> HashAggregate @@ -2342,8 +2302,8 @@ EXPLAIN (COSTS OFF, TIMING OFF) SELECT * FROM conditions_daily WHERE time_bucket -> Result -> Custom Scan (ChunkAppend) on conditions Chunks excluded during startup: 26 - -> Index Scan Backward using _hyper_63_184_chunk_conditions_timec_idx on _hyper_63_184_chunk - Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan Backward using _hyper_62_182_chunk_conditions_timec_idx on _hyper_62_182_chunk + Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) Filter: (time_bucket('@ 1 day'::interval, timec) >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone) (22 rows) diff --git a/tsl/test/expected/continuous_aggs-16.out b/tsl/test/expected/continuous_aggs-16.out index b5431d73158..642ba04adea 100644 --- a/tsl/test/expected/continuous_aggs-16.out +++ b/tsl/test/expected/continuous_aggs-16.out @@ -2214,16 +2214,10 @@ AS SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) FROM conditions GROUP BY time_bucket('1day', timec) WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_old(timec, minl, sumt, sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -AS -SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) -FROM conditions -GROUP BY time_bucket('1day', timec) WITH NO DATA; \x ON SELECT * FROM timescaledb_information.continuous_aggregates -WHERE view_name IN ('conditions_summary_new', 'conditions_summary_old'); +WHERE view_name = 'conditions_summary_new'; -[ RECORD 1 ]---------------------+---------------------------------------------------------- hypertable_schema | public hypertable_name | conditions @@ -2241,52 +2235,18 @@ view_definition | SELECT time_bucket('@ 1 day'::interval, tim | FROM conditions + | GROUP BY (time_bucket('@ 1 day'::interval, timec)); finalized | t --[ RECORD 2 ]---------------------+---------------------------------------------------------- -hypertable_schema | public -hypertable_name | conditions -view_schema | public -view_name | conditions_summary_old -view_owner | default_perm_user -materialized_only | t -compression_enabled | f -materialization_hypertable_schema | _timescaledb_internal -materialization_hypertable_name | _materialized_hypertable_62 -view_definition | SELECT time_bucket('@ 1 day'::interval, timec) AS timec,+ - | min(location) AS minl, + - | sum(temperature) AS sumt, + - | sum(humidity) AS sumh + - | FROM conditions + - | GROUP BY (time_bucket('@ 1 day'::interval, timec)); -finalized | f \x OFF CALL refresh_continuous_aggregate('conditions_summary_new', NULL, NULL); -CALL refresh_continuous_aggregate('conditions_summary_old', NULL, NULL); -- Check and compare number of returned rows -SELECT count(*) FROM conditions_summary_new -UNION -SELECT count(*) FROM conditions_summary_old; +SELECT count(*) FROM conditions_summary_new; count ------- 4 (1 row) --- Should return 4 rows that is the same number of rows above -SELECT * -FROM conditions_summary_new -NATURAL JOIN conditions_summary_old -ORDER BY timec; - timec | minl | sumt | sumh -------------------------------+------+------+------ - Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 - Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 - Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 - Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 -(4 rows) - -- Parallel planning test for realtime Continuous Aggregate DROP TABLE conditions CASCADE; -NOTICE: drop cascades to 4 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( @@ -2324,17 +2284,17 @@ EXPLAIN (COSTS OFF, TIMING OFF) SELECT * FROM conditions_daily WHERE time_bucket QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Merge Append - Sort Key: _materialized_hypertable_64.sum DESC + Sort Key: _materialized_hypertable_63.sum DESC -> Sort - Sort Key: _materialized_hypertable_64.sum DESC - -> Custom Scan (ChunkAppend) on _materialized_hypertable_64 + Sort Key: _materialized_hypertable_63.sum DESC + -> Custom Scan (ChunkAppend) on _materialized_hypertable_63 Chunks excluded during startup: 0 - -> Index Scan using _hyper_64_185_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_185_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_189_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_189_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) - -> Index Scan using _hyper_64_190_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_190_chunk - Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_183_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_183_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_187_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_187_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_63_188_chunk__materialized_hypertable_63_time_bucket_idx on _hyper_63_188_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) -> Sort Sort Key: (sum(conditions.temperature)) DESC -> HashAggregate @@ -2342,8 +2302,8 @@ EXPLAIN (COSTS OFF, TIMING OFF) SELECT * FROM conditions_daily WHERE time_bucket -> Result -> Custom Scan (ChunkAppend) on conditions Chunks excluded during startup: 26 - -> Index Scan Backward using _hyper_63_184_chunk_conditions_timec_idx on _hyper_63_184_chunk - Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan Backward using _hyper_62_182_chunk_conditions_timec_idx on _hyper_62_182_chunk + Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(63)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) Filter: (time_bucket('@ 1 day'::interval, timec) >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone) (22 rows) diff --git a/tsl/test/expected/telemetry_stats-13.out b/tsl/test/expected/telemetry_stats-13.out index 13d4ca32ce8..846be6b56db 100644 --- a/tsl/test/expected/telemetry_stats-13.out +++ b/tsl/test/expected/telemetry_stats-13.out @@ -70,16 +70,6 @@ FROM hyper GROUP BY hour, device; NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date -- Create another view (already have the "relations" view) CREATE VIEW devices AS SELECT DISTINCT ON (device) device @@ -153,12 +143,12 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; }, + "indexes_size": 0, + "num_children": 0, + - "num_relations": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -216,7 +206,6 @@ SELECT * FROM normal; INSERT INTO part SELECT * FROM normal; CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); -- ANALYZE to get updated reltuples stats ANALYZE normal, hyper, part; SELECT count(c) FROM show_chunks('hyper') c; @@ -231,12 +220,6 @@ SELECT count(c) FROM show_chunks('contagg') c; 2 (1 row) -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - -- Update and show the telemetry report REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; @@ -289,8 +272,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + + "heap_size": 90112, + + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + @@ -304,14 +287,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 0, + "compressed_row_count_frozen_immediately": 0+ }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 114688, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -446,8 +429,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 180224, + - "toast_size": 24576, + + "heap_size": 81920, + + "toast_size": 8192, + "compression": { + "compressed_heap_size": 40960, + "compressed_row_count": 10, + @@ -461,14 +444,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 81920, + "compressed_row_count_frozen_immediately": 10+ }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 65536, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + + "num_caggs_using_real_time_aggregation": 0 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -560,24 +543,18 @@ select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, inte 1002 (1 row) -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - -- add retention policy, fixed select add_retention_policy('hyper', interval '1 year', initial_start => now()); add_retention_policy ---------------------- - 1004 + 1003 (1 row) -- add compression policy select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); add_compression_policy ------------------------ - 1005 + 1004 (1 row) select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; @@ -589,7 +566,7 @@ select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_ac select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; contagg_fixed | contagg_drifting ---------------+------------------ - 1 | 1 + 0 | 1 (1 row) select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; @@ -711,7 +688,7 @@ CREATE TABLE conditions ( SELECT create_hypertable('conditions', 'time'); create_hypertable ------------------------- - (6,public,conditions,t) + (5,public,conditions,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_hourly_1 diff --git a/tsl/test/expected/telemetry_stats-14.out b/tsl/test/expected/telemetry_stats-14.out index dbebefc4231..83ffe4ec9a5 100644 --- a/tsl/test/expected/telemetry_stats-14.out +++ b/tsl/test/expected/telemetry_stats-14.out @@ -70,16 +70,6 @@ FROM hyper GROUP BY hour, device; NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date -- Create another view (already have the "relations" view) CREATE VIEW devices AS SELECT DISTINCT ON (device) device @@ -153,12 +143,12 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; }, + "indexes_size": 0, + "num_children": 0, + - "num_relations": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -216,7 +206,6 @@ SELECT * FROM normal; INSERT INTO part SELECT * FROM normal; CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); -- ANALYZE to get updated reltuples stats ANALYZE normal, hyper, part; SELECT count(c) FROM show_chunks('hyper') c; @@ -231,12 +220,6 @@ SELECT count(c) FROM show_chunks('contagg') c; 2 (1 row) -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - -- Update and show the telemetry report REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; @@ -289,8 +272,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + + "heap_size": 90112, + + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + @@ -304,14 +287,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 0, + "compressed_row_count_frozen_immediately": 0+ }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 114688, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -446,8 +429,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 24576, + + "heap_size": 90112, + + "toast_size": 8192, + "compression": { + "compressed_heap_size": 49152, + "compressed_row_count": 10, + @@ -461,14 +444,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 81920, + "compressed_row_count_frozen_immediately": 10+ }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 65536, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + + "num_caggs_using_real_time_aggregation": 0 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -560,24 +543,18 @@ select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, inte 1002 (1 row) -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - -- add retention policy, fixed select add_retention_policy('hyper', interval '1 year', initial_start => now()); add_retention_policy ---------------------- - 1004 + 1003 (1 row) -- add compression policy select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); add_compression_policy ------------------------ - 1005 + 1004 (1 row) select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; @@ -589,7 +566,7 @@ select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_ac select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; contagg_fixed | contagg_drifting ---------------+------------------ - 1 | 1 + 0 | 1 (1 row) select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; @@ -711,7 +688,7 @@ CREATE TABLE conditions ( SELECT create_hypertable('conditions', 'time'); create_hypertable ------------------------- - (6,public,conditions,t) + (5,public,conditions,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_hourly_1 diff --git a/tsl/test/expected/telemetry_stats-15.out b/tsl/test/expected/telemetry_stats-15.out index dbebefc4231..83ffe4ec9a5 100644 --- a/tsl/test/expected/telemetry_stats-15.out +++ b/tsl/test/expected/telemetry_stats-15.out @@ -70,16 +70,6 @@ FROM hyper GROUP BY hour, device; NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date -- Create another view (already have the "relations" view) CREATE VIEW devices AS SELECT DISTINCT ON (device) device @@ -153,12 +143,12 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; }, + "indexes_size": 0, + "num_children": 0, + - "num_relations": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -216,7 +206,6 @@ SELECT * FROM normal; INSERT INTO part SELECT * FROM normal; CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); -- ANALYZE to get updated reltuples stats ANALYZE normal, hyper, part; SELECT count(c) FROM show_chunks('hyper') c; @@ -231,12 +220,6 @@ SELECT count(c) FROM show_chunks('contagg') c; 2 (1 row) -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - -- Update and show the telemetry report REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; @@ -289,8 +272,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + + "heap_size": 90112, + + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + @@ -304,14 +287,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 0, + "compressed_row_count_frozen_immediately": 0+ }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 114688, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -446,8 +429,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 24576, + + "heap_size": 90112, + + "toast_size": 8192, + "compression": { + "compressed_heap_size": 49152, + "compressed_row_count": 10, + @@ -461,14 +444,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 81920, + "compressed_row_count_frozen_immediately": 10+ }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 65536, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + + "num_caggs_using_real_time_aggregation": 0 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -560,24 +543,18 @@ select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, inte 1002 (1 row) -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - -- add retention policy, fixed select add_retention_policy('hyper', interval '1 year', initial_start => now()); add_retention_policy ---------------------- - 1004 + 1003 (1 row) -- add compression policy select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); add_compression_policy ------------------------ - 1005 + 1004 (1 row) select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; @@ -589,7 +566,7 @@ select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_ac select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; contagg_fixed | contagg_drifting ---------------+------------------ - 1 | 1 + 0 | 1 (1 row) select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; @@ -711,7 +688,7 @@ CREATE TABLE conditions ( SELECT create_hypertable('conditions', 'time'); create_hypertable ------------------------- - (6,public,conditions,t) + (5,public,conditions,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_hourly_1 diff --git a/tsl/test/expected/telemetry_stats-16.out b/tsl/test/expected/telemetry_stats-16.out index dbebefc4231..83ffe4ec9a5 100644 --- a/tsl/test/expected/telemetry_stats-16.out +++ b/tsl/test/expected/telemetry_stats-16.out @@ -70,16 +70,6 @@ FROM hyper GROUP BY hour, device; NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date -- Create another view (already have the "relations" view) CREATE VIEW devices AS SELECT DISTINCT ON (device) device @@ -153,12 +143,12 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; }, + "indexes_size": 0, + "num_children": 0, + - "num_relations": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -216,7 +206,6 @@ SELECT * FROM normal; INSERT INTO part SELECT * FROM normal; CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); -- ANALYZE to get updated reltuples stats ANALYZE normal, hyper, part; SELECT count(c) FROM show_chunks('hyper') c; @@ -231,12 +220,6 @@ SELECT count(c) FROM show_chunks('contagg') c; 2 (1 row) -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - -- Update and show the telemetry report REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; @@ -289,8 +272,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + + "heap_size": 90112, + + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + @@ -304,14 +287,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 0, + "compressed_row_count_frozen_immediately": 0+ }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 114688, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + + "num_caggs_using_real_time_aggregation": 1 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -446,8 +429,8 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "num_reltuples": 697 + }, + "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 24576, + + "heap_size": 90112, + + "toast_size": 8192, + "compression": { + "compressed_heap_size": 49152, + "compressed_row_count": 10, + @@ -461,14 +444,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; "uncompressed_indexes_size": 81920, + "compressed_row_count_frozen_immediately": 10+ }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + + "indexes_size": 65536, + + "num_children": 2, + + "num_relations": 1, + "num_reltuples": 0, + "num_caggs_nested": 0, + "num_caggs_finalized": 1, + "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + + "num_caggs_using_real_time_aggregation": 0 + }, + "distributed_hypertables_data_node": { + "heap_size": 0, + @@ -560,24 +543,18 @@ select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, inte 1002 (1 row) -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - -- add retention policy, fixed select add_retention_policy('hyper', interval '1 year', initial_start => now()); add_retention_policy ---------------------- - 1004 + 1003 (1 row) -- add compression policy select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); add_compression_policy ------------------------ - 1005 + 1004 (1 row) select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; @@ -589,7 +566,7 @@ select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_ac select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; contagg_fixed | contagg_drifting ---------------+------------------ - 1 | 1 + 0 | 1 (1 row) select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; @@ -711,7 +688,7 @@ CREATE TABLE conditions ( SELECT create_hypertable('conditions', 'time'); create_hypertable ------------------------- - (6,public,conditions,t) + (5,public,conditions,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_hourly_1 diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 393aabe6cac..e86278f9692 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -139,13 +139,11 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) TEST_TEMPLATES cagg_bgw.sql.in cagg_ddl.sql.in - cagg_errors_deprecated.sql.in cagg_query.sql.in cagg_repair.sql.in cagg_usage.sql.in compression_errors.sql.in - continuous_aggs.sql.in - continuous_aggs_deprecated.sql.in) + continuous_aggs.sql.in) if(USE_TELEMETRY) list(APPEND TEST_TEMPLATES telemetry_stats.sql.in) endif() diff --git a/tsl/test/sql/cagg_errors_deprecated.sql.in b/tsl/test/sql/cagg_errors_deprecated.sql.in deleted file mode 100644 index 937df662cd7..00000000000 --- a/tsl/test/sql/cagg_errors_deprecated.sql.in +++ /dev/null @@ -1,597 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. - -\set ON_ERROR_STOP 0 -\set VERBOSITY default - ---negative tests for query validation -create table mat_t1( a integer, b integer,c TEXT); - -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL - ); -select table_name from create_hypertable( 'conditions', 'timec'); - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) -as -select location , min(temperature) -from conditions -group by time_bucket('1d', timec), location WITH NO DATA; - ---valid PG option -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) -as -select * from conditions , mat_t1 WITH NO DATA; - ---non-hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select a, count(*) from mat_t1 -group by a WITH NO DATA; - --- no group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions WITH NO DATA; - --- no time_bucket in group by -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select count(*) from conditions group by location WITH NO DATA; - --- with valid query in a CTE -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -with m1 as ( -Select location, count(*) from conditions - group by time_bucket('1week', timec) , location) -select * from m1 WITH NO DATA; - ---with DISTINCT ON -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as - select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; - ---aggregate with DISTINCT -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - count(location) , sum(distinct temperature) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; - ---aggregate with FILTER -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), - sum(temperature) filter ( where humidity > 20 ) from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; - --- aggregate with filter in having clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select time_bucket('1week', timec), max(temperature) -from conditions - group by time_bucket('1week', timec) , location - having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; - --- time_bucket on non partitioning column of hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timemeasure) , location WITH NO DATA; - ---time_bucket on expression -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; - ---multiple time_bucket functions -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; - ---time_bucket using additional args -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; - ---time_bucket using non-const for first argument -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select max(temperature) -from conditions - group by time_bucket( timeinterval, timec) , location WITH NO DATA; - --- ordered set aggr -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select mode() within group( order by humidity) -from conditions - group by time_bucket('1week', timec) WITH NO DATA; - ---window function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select avg(temperature) over( order by humidity) -from conditions - WITH NO DATA; - ---aggregate without combine function -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select json_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -; - --- Starting on PG16 this test will pass because array_agg is parallel safe --- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature), array_agg(location) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -; - --- userdefined aggregate without combine function -CREATE AGGREGATE newavg ( - sfunc = int4_avg_accum, basetype = int4, stype = _int8, - finalfunc = int8_avg, - initcond1 = '{0,0}' -); - -DROP MATERIALIZED VIEW IF EXISTS mat_m1; -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), newavg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location WITH NO DATA; -; - --- using subqueries -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from -( select humidity, temperature, location, timec -from conditions ) q - group by time_bucket('1week', timec) , location WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -select * from -( Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location ) q WITH NO DATA; - ---using limit /limit offset -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -limit 10 WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -offset 10 WITH NO DATA; - ---using ORDER BY in view defintion -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -ORDER BY 1 WITH NO DATA; - ---using FETCH -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -fetch first 10 rows only WITH NO DATA; - ---using locking clauses FOR clause ---all should be disabled. we cannot guarntee locks on the hypertable -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR KEY SHARE WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR SHARE WITH NO DATA; - - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR UPDATE WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by time_bucket('1week', timec) , location -FOR NO KEY UPDATE WITH NO DATA; - ---tablesample clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions tablesample bernoulli(0.2) - group by time_bucket('1week', timec) , location - WITH NO DATA; - --- ONLY in from clause -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from ONLY conditions - group by time_bucket('1week', timec) , location WITH NO DATA; - ---grouping sets and variants -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions - group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), avg(temperature::int4) -from conditions -group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; - ---NO immutable functions -- check all clauses -CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' - STABLE AS 'SELECT $1 + 10'; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), max(timec + INTERVAL '1h') -from conditions -group by time_bucket('1week', timec) , location WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum(humidity), min(location) -from conditions -group by time_bucket('1week', timec) -having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( test_stablefunc(humidity::int) ), min(location) -from conditions -group by time_bucket('1week', timec) WITH NO DATA; - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( temperature ), min(location) -from conditions -group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; - --- Should use CREATE MATERIALIZED VIEW to create continuous aggregates -CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 week', timec) - FROM conditions -GROUP BY time_bucket('1 week', timec); - --- row security on table -create table rowsec_tab( a bigint, b integer, c integer); -select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; -SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); -alter table rowsec_tab ENABLE ROW LEVEL SECURITY; -create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); - -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -Select sum( b), min(c) -from rowsec_tab -group by time_bucket('1', a) WITH NO DATA; - --- cagg on cagg not allowed -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS -SELECT time_bucket('1 day', timec) AS bucket - FROM conditions -GROUP BY time_bucket('1 day', timec); - -CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS -SELECT time_bucket('1 week', bucket) AS bucket - FROM mat_m1 -GROUP BY time_bucket('1 week', bucket); - -drop table conditions cascade; - ---negative tests for WITH options -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec) WITH NO DATA; - -SELECT h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_with_test' -\gset - -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); -ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); -ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; -\set ON_ERROR_STOP 1 -\set VERBOSITY terse - -DROP TABLE conditions CASCADE; - ---test WITH using a hypertable with an integer time dimension -CREATE TABLE conditions ( - timec SMALLINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); -CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_s'); - -\set ON_ERROR_STOP 0 -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; - -ALTER TABLE conditions ALTER timec type int; - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; - -\set ON_ERROR_STOP 1 -DROP TABLE conditions cascade; - -CREATE TABLE conditions ( - timec BIGINT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); -CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_test_b'); - -create materialized view mat_with_test( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -as -select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by 1 WITH NO DATA; - --- custom time partition functions are not supported with invalidations -CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT - AS $$ SELECT length($1)::BIGINT $$ - LANGUAGE SQL IMMUTABLE; - -CREATE TABLE text_time(time TEXT); - SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); - -\set VERBOSITY default -\set ON_ERROR_STOP 0 -CREATE MATERIALIZED VIEW text_view - WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) - AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time - GROUP BY 1 WITH NO DATA; -\set ON_ERROR_STOP 1 - --- Check that we get an error when mixing normal materialized views --- and continuous aggregates. -CREATE MATERIALIZED VIEW normal_mat_view AS -SELECT time_bucket('5', text_part_func(time)), COUNT(time) - FROM text_time -GROUP BY 1 WITH NO DATA; -\set VERBOSITY terse - -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; -\set ON_ERROR_STOP 1 - -DROP TABLE text_time CASCADE; - -CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); -SELECT create_hypertable('measurements', 'time'); - -INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); - --- Add a continuous aggregate on the measurements table and a policy --- to be able to test error cases for the add_job function. -CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS -SELECT time_bucket('1 day', time), COUNT(time) - FROM measurements -GROUP BY 1 WITH NO DATA; - -SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'measurements_summary' -\gset - --- First test that add_job checks the config. It is currently possible --- to add non-custom jobs using the add_job function so we need to --- test that the function actually checks the config parameters. These --- should all generate errors, for different reasons... -\set ON_ERROR_STOP 0 --- ... this one because it is missing a field. -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": null, "start_offset": null}'); --- ... this one because it has a bad value for start_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); --- ... this one because it has a bad value for end_offset -SELECT add_job( - '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, - '1 hour'::interval, - check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -\set ON_ERROR_STOP 1 - -SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id -\gset - -\x on -SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; -\x off - --- These are all weird values for the parameters for the continuous --- aggregate jobs and should generate an error. Since the config will --- be replaced, we will also generate error for missing arguments. -\set ON_ERROR_STOP 0 -SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); -SELECT alter_job(:job_id, - config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); -\set ON_ERROR_STOP 1 - -DROP TABLE measurements CASCADE; -DROP TABLE conditions CASCADE; - --- test handling of invalid mat_hypertable_id -create table i2980(time timestamptz not null); -select create_hypertable('i2980','time'); -create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; -select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset -\set ON_ERROR_STOP 0 -select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); - ---test creating continuous aggregate with compression enabled -- -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; - ---this one succeeds -CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) -AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; - ---now enable compression with invalid parameters -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_segmentby = 'bucket'); - -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, -timescaledb.compress_orderby = 'bucket'); - ---enable compression and test re-enabling compression -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); -insert into i2980 select now(); -call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); -ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); - ---Errors with compression policy on caggs-- -select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); -ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); -SELECT add_compression_policy('i2980_cagg', '8 day'::interval); - -SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); -SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); -SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); -SELECT add_compression_policy('i2980_cagg2', '3'::integer); -SELECT add_compression_policy('i2980_cagg2', 13::integer); - -SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" -FROM timescaledb_information.continuous_aggregates -WHERE view_name = 'i2980_cagg2' -\gset -SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); - ---TEST compressing cagg chunks without enabling compression -SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; -ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); -SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; - --- test error handling when trying to create cagg on internal hypertable -CREATE TABLE comp_ht_test(time timestamptz NOT NULL); -SELECT table_name FROM create_hypertable('comp_ht_test','time'); -ALTER TABLE comp_ht_test SET (timescaledb.compress); - -SELECT - format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" -FROM - _timescaledb_catalog.hypertable ht - INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id - AND uncompress.table_name = 'comp_ht_test') \gset - -CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; diff --git a/tsl/test/sql/cagg_joins.sql b/tsl/test/sql/cagg_joins.sql index 6c40c09b4b6..c34a7f95cdb 100644 --- a/tsl/test/sql/cagg_joins.sql +++ b/tsl/test/sql/cagg_joins.sql @@ -380,40 +380,8 @@ AND conditions.city = devices.location AND conditions.temperature > 28 GROUP BY name, bucket; ---With old format cagg definition -CREATE MATERIALIZED VIEW cagg_cagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized = FALSE) AS -SELECT time_bucket(INTERVAL '1 day', day) AS bucket, - AVG(temperature), - MAX(temperature), - MIN(temperature), - devices.device_id device_id, - name -FROM conditions, devices -WHERE conditions.device_id = devices.device_id -GROUP BY name, bucket, devices.device_id; - -CREATE MATERIALIZED VIEW cagg_cagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized = FALSE) AS -SELECT time_bucket(INTERVAL '1 day', day) AS bucket, - AVG(temperature), - MAX(temperature), - MIN(temperature), - devices.device_id device_id, - name -FROM conditions JOIN devices -ON conditions.device_id = devices.device_id -GROUP BY name, bucket, devices.device_id; - CREATE TABLE mat_t1( a integer, b integer,c TEXT); ---With LATERAL multiple tables old format -CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only = FALSE, timescaledb.finalized = false) -as -select temperature, count(*) from conditions, -LATERAL (Select * from mat_t1 where a = conditions.temperature) q -group by temperature WITH NO DATA; - --With LATERAL multiple tables in new format CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only = FALSE) as diff --git a/tsl/test/sql/cagg_migrate.sql b/tsl/test/sql/cagg_migrate.sql index f94e7bd05b3..49acee5b5e4 100644 --- a/tsl/test/sql/cagg_migrate.sql +++ b/tsl/test/sql/cagg_migrate.sql @@ -2,12 +2,12 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\set IS_DISTRIBUTED FALSE -\set IS_TIME_DIMENSION FALSE +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -- ######################################################## -- ## INTEGER data type tests -- ######################################################## +\set IS_TIME_DIMENSION FALSE \set TIME_DIMENSION_DATATYPE INTEGER \ir include/cagg_migrate_common.sql diff --git a/tsl/test/sql/cagg_repair.sql.in b/tsl/test/sql/cagg_repair.sql.in index 83172bdad96..075f1aa3e37 100644 --- a/tsl/test/sql/cagg_repair.sql.in +++ b/tsl/test/sql/cagg_repair.sql.in @@ -109,22 +109,4 @@ WITH NO DATA; CALL _timescaledb_internal.cagg_try_repair('conditions_summary_nojoin', TRUE); \d+ conditions_summary_nojoin --- Tests with old cagg format -CREATE MATERIALIZED VIEW conditions_summary_old_format -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket(INTERVAL '1 week', "time") AS bucket, - MIN(temperature), - MAX(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - 1 -WITH NO DATA; - --- Should rebuild without forcing -CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); -\d+ conditions_summary_old_format - DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/sql/continuous_aggs.sql.in b/tsl/test/sql/continuous_aggs.sql.in index 1434aac364d..91bf94fc092 100644 --- a/tsl/test/sql/continuous_aggs.sql.in +++ b/tsl/test/sql/continuous_aggs.sql.in @@ -1490,32 +1490,16 @@ SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity FROM conditions GROUP BY time_bucket('1day', timec) WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_old(timec, minl, sumt, sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -AS -SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) -FROM conditions -GROUP BY time_bucket('1day', timec) WITH NO DATA; - \x ON SELECT * FROM timescaledb_information.continuous_aggregates -WHERE view_name IN ('conditions_summary_new', 'conditions_summary_old'); +WHERE view_name = 'conditions_summary_new'; \x OFF CALL refresh_continuous_aggregate('conditions_summary_new', NULL, NULL); -CALL refresh_continuous_aggregate('conditions_summary_old', NULL, NULL); -- Check and compare number of returned rows -SELECT count(*) FROM conditions_summary_new -UNION -SELECT count(*) FROM conditions_summary_old; - --- Should return 4 rows that is the same number of rows above -SELECT * -FROM conditions_summary_new -NATURAL JOIN conditions_summary_old -ORDER BY timec; +SELECT count(*) FROM conditions_summary_new; -- Parallel planning test for realtime Continuous Aggregate DROP TABLE conditions CASCADE; diff --git a/tsl/test/sql/continuous_aggs_deprecated.sql.in b/tsl/test/sql/continuous_aggs_deprecated.sql.in deleted file mode 100644 index 881f94a924e..00000000000 --- a/tsl/test/sql/continuous_aggs_deprecated.sql.in +++ /dev/null @@ -1,1358 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. - --- initialize the bgw mock state to prevent the materialization workers from running -\c :TEST_DBNAME :ROLE_SUPERUSER - -CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID -AS :MODULE_PATHNAME LANGUAGE C VOLATILE; - -CREATE OR REPLACE FUNCTION test.continuous_aggs_find_view(cagg REGCLASS) RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_test_continuous_agg_find_by_view_name' LANGUAGE C; - -\set WAIT_ON_JOB 0 -\set IMMEDIATELY_SET_UNTIL 1 -\set WAIT_FOR_OTHER_TO_ADVANCE 2 - --- remove any default jobs, e.g., telemetry so bgw_job isn't polluted -DELETE FROM _timescaledb_config.bgw_job; - -SET ROLE :ROLE_DEFAULT_PERM_USER; - -SELECT * FROM _timescaledb_config.bgw_job; - ---TEST1 --- ---basic test with count -create table foo (a integer, b integer, c integer); -select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10); - -insert into foo values( 3 , 16 , 20); -insert into foo values( 1 , 10 , 20); -insert into foo values( 1 , 11 , 20); -insert into foo values( 1 , 12 , 20); -insert into foo values( 1 , 13 , 20); -insert into foo values( 1 , 14 , 20); -insert into foo values( 2 , 14 , 20); -insert into foo values( 2 , 15 , 20); -insert into foo values( 2 , 16 , 20); - -CREATE OR REPLACE FUNCTION integer_now_foo() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0) FROM foo $$; -SELECT set_integer_now_func('foo', 'integer_now_foo'); - - -CREATE MATERIALIZED VIEW mat_m1(a, countb) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select a, count(b) -from foo -group by time_bucket(1, a), a WITH NO DATA; - -SELECT add_continuous_aggregate_policy('mat_m1', NULL, 2::integer, '12 h'::interval) AS job_id -\gset -SELECT * FROM _timescaledb_config.bgw_job; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_m1' -\gset - -insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" -select a, _timescaledb_functions.partialize_agg(count(b)), -time_bucket(1, a) -,1 -from foo -group by time_bucket(1, a) , a ; - -select * from mat_m1 order by a ; - ---check triggers on user hypertable -- -SET ROLE :ROLE_SUPERUSER; -select tgname, tgtype, tgenabled , relname from pg_trigger, pg_class -where tgrelid = pg_class.oid and pg_class.relname like 'foo' -order by tgname; - -SET ROLE :ROLE_DEFAULT_PERM_USER; - --- TEST2 --- -DROP MATERIALIZED VIEW mat_m1; - -SHOW enable_partitionwise_aggregate; -SET enable_partitionwise_aggregate = on; - -SELECT * FROM _timescaledb_config.bgw_job; - -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - -insert into conditions values ( '2010-01-01 09:00:00-08', 'SFO', 55, 45); -insert into conditions values ( '2010-01-02 09:00:00-08', 'por', 100, 100); -insert into conditions values ( '2010-01-02 09:00:00-08', 'SFO', 65, 45); -insert into conditions values ( '2010-01-02 09:00:00-08', 'NYC', 65, 45); -insert into conditions values ( '2018-11-01 09:00:00-08', 'NYC', 45, 35); -insert into conditions values ( '2018-11-02 09:00:00-08', 'NYC', 35, 15); - - -CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec) WITH NO DATA; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_m1' -\gset - --- Materialized hypertable for mat_m1 should not be visible in the --- hypertables view: -SELECT hypertable_schema, hypertable_name -FROM timescaledb_information.hypertables ORDER BY 1,2; - -SET ROLE :ROLE_SUPERUSER; -insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" -select - time_bucket('1day', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)) -,1 -from conditions -group by time_bucket('1day', timec) ; - -SET ROLE :ROLE_DEFAULT_PERM_USER; ---should have same results -- -select timec, minl, sumt, sumh -from mat_m1 -order by timec; - -select time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) -from conditions -group by time_bucket('1day', timec) -order by 1; - -SET enable_partitionwise_aggregate = off; - --- TEST3 -- --- drop on table conditions should cascade to materialized mat_v1 - -drop table conditions cascade; - -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - -insert into conditions values ( '2010-01-01 09:00:00-08', 'SFO', 55, 45); -insert into conditions values ( '2010-01-02 09:00:00-08', 'por', 100, 100); -insert into conditions values ( '2010-01-02 09:00:00-08', 'NYC', 65, 45); -insert into conditions values ( '2010-01-02 09:00:00-08', 'SFO', 65, 45); -insert into conditions values ( '2010-01-03 09:00:00-08', 'NYC', 45, 55); -insert into conditions values ( '2010-01-05 09:00:00-08', 'SFO', 75, 100); -insert into conditions values ( '2018-11-01 09:00:00-08', 'NYC', 45, 35); -insert into conditions values ( '2018-11-02 09:00:00-08', 'NYC', 35, 15); -insert into conditions values ( '2018-11-03 09:00:00-08', 'NYC', 35, 25); - - -CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumth, stddevh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1week', timec) , -min(location), sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by time_bucket('1week', timec) WITH NO DATA; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_m1' -\gset - -SET ROLE :ROLE_SUPERUSER; -insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" -select - time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) -,1 -from conditions -group by time_bucket('1week', timec) ; -SET ROLE :ROLE_DEFAULT_PERM_USER; - ---should have same results -- -select timec, minl, sumth, stddevh -from mat_m1 -order by timec; - -select time_bucket('1week', timec) , -min(location), sum(temperature)+ sum(humidity), stddev(humidity) -from conditions -group by time_bucket('1week', timec) -order by time_bucket('1week', timec); - --- TEST4 -- ---materialized view with group by clause + expression in SELECT --- use previous data from conditions ---drop only the view. - --- apply where clause on result of mat_m1 -- -DROP MATERIALIZED VIEW mat_m1; -CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumth, stddevh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1week', timec) , -min(location), sum(temperature)+sum(humidity), stddev(humidity) -from conditions -where location = 'NYC' -group by time_bucket('1week', timec) - WITH NO DATA; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_m1' -\gset - -SET ROLE :ROLE_SUPERUSER; -insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" -select - time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) -,1 -from conditions -where location = 'NYC' -group by time_bucket('1week', timec) ; -SET ROLE :ROLE_DEFAULT_PERM_USER; - ---should have same results -- -select timec, minl, sumth, stddevh -from mat_m1 -where stddevh is not null -order by timec; - -select time_bucket('1week', timec) , -min(location), sum(temperature)+ sum(humidity), stddev(humidity) -from conditions -where location = 'NYC' -group by time_bucket('1week', timec) -order by time_bucket('1week', timec); - --- TEST5 -- ----------test with having clause ---------------------- -DROP MATERIALIZED VIEW mat_m1; -create materialized view mat_m1( timec, minl, sumth, stddevh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1week', timec) , -min(location), sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by time_bucket('1week', timec) -having stddev(humidity) is not null WITH NO DATA; -; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_m1' -\gset - -SET ROLE :ROLE_SUPERUSER; -insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" -select - time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) -,1 -from conditions -group by time_bucket('1week', timec) ; -SET ROLE :ROLE_DEFAULT_PERM_USER; - --- should have same results -- -select * from mat_m1 -order by sumth; - -select time_bucket('1week', timec) , -min(location), sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by time_bucket('1week', timec) -having stddev(humidity) is not null -order by sum(temperature)+sum(humidity); - --- TEST6 -- ---group by with more than 1 group column --- having clause with a mix of columns from select list + others - -drop table conditions cascade; - -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp numeric NULL, - highp numeric null - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - -insert into conditions -select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70; -insert into conditions -select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40; -insert into conditions -select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, 71, 28; - ---naming with AS clauses -CREATE MATERIALIZED VIEW mat_naming -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1week', timec) as bucket, location as loc, sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by bucket, loc -having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_naming' -\gset - -select attnum , attname from pg_attribute -where attnum > 0 and attrelid = -(Select oid from pg_class where relname like :'MAT_TABLE_NAME') -order by attnum, attname; - -DROP MATERIALIZED VIEW mat_naming; - ---naming with default names -CREATE MATERIALIZED VIEW mat_naming -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1week', timec), location, sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by 1,2 -having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_naming' -\gset - -select attnum , attname from pg_attribute -where attnum > 0 and attrelid = -(Select oid from pg_class where relname like :'MAT_TABLE_NAME') -order by attnum, attname; - -DROP MATERIALIZED VIEW mat_naming; - ---naming with view col names -CREATE MATERIALIZED VIEW mat_naming(bucket, loc, sum_t_h, stdd) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1week', timec), location, sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by 1,2 -having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_naming' -\gset - -select attnum , attname from pg_attribute -where attnum > 0 and attrelid = -(Select oid from pg_class where relname like :'MAT_TABLE_NAME') -order by attnum, attname; - -DROP MATERIALIZED VIEW mat_naming; - -CREATE MATERIALIZED VIEW mat_m1(timec, minl, sumth, stddevh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1week', timec) , -min(location), sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by time_bucket('1week', timec) -having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_m1' -\gset - -select attnum , attname from pg_attribute -where attnum > 0 and attrelid = -(Select oid from pg_class where relname like :'MAT_TABLE_NAME') -order by attnum, attname; - -SET ROLE :ROLE_SUPERUSER; -insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" -select - time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) -,_timescaledb_functions.partialize_agg( avg(temperature)) -,1 -from conditions -group by time_bucket('1week', timec) ; -SET ROLE :ROLE_DEFAULT_PERM_USER; - ---should have same results -- -select timec, minl, sumth, stddevh -from mat_m1 -order by timec, minl; - -select time_bucket('1week', timec) , -min(location), sum(temperature)+sum(humidity), stddev(humidity) -from conditions -group by time_bucket('1week', timec) -having min(location) >= 'NYC' and avg(temperature) > 20 and avg(lowp) > 10 -order by time_bucket('1week', timec), min(location); - ---check view defintion in information views -select view_name, view_definition from timescaledb_information.continuous_aggregates -where view_name::text like 'mat_m1'; - ---TEST6 -- select from internal view - -SET ROLE :ROLE_SUPERUSER; -insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" -select * from :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME"; -SET ROLE :ROLE_DEFAULT_PERM_USER; - ---lets drop the view and check -DROP MATERIALIZED VIEW mat_m1; - -drop table conditions; -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - -insert into conditions -select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70, NULL; -insert into conditions -select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40, NULL; -insert into conditions -select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; - - -SELECT - $$ - select time_bucket('1week', timec) , - min(location) as col1, sum(temperature)+sum(humidity) as col2, stddev(humidity) as col3, min(allnull) as col4 - from conditions - group by time_bucket('1week', timec) - having min(location) >= 'NYC' and avg(temperature) > 20 - $$ AS "QUERY" -\gset - - -\set ECHO errors -\ir include/cont_agg_equal_deprecated.sql -\set ECHO all - -SELECT - $$ - select time_bucket('1week', timec), location, - sum(temperature)+sum(humidity) as col2, stddev(humidity) as col3, min(allnull) as col4 - from conditions - group by location, time_bucket('1week', timec) - $$ AS "QUERY" -\gset - -\set ECHO errors -\ir include/cont_agg_equal_deprecated.sql -\set ECHO all - ---TEST7 -- drop tests for view and hypertable ---DROP tests -\set ON_ERROR_STOP 0 -SELECT h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA", - direct_view_name as "DIR_VIEW_NAME", - direct_view_schema as "DIR_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_test' -\gset - -DROP TABLE :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME"; -DROP VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME"; -DROP VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME"; -\set ON_ERROR_STOP 1 - ---catalog entry still there; -SELECT count(*) -FROM _timescaledb_catalog.continuous_agg ca -WHERE user_view_name = 'mat_test'; - ---mat table, user_view, direct view and partial view all there -select count(*) from pg_class where relname = :'PART_VIEW_NAME'; -select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; -select count(*) from pg_class where relname = :'DIR_VIEW_NAME'; -select count(*) from pg_class where relname = 'mat_test'; - -DROP MATERIALIZED VIEW mat_test; - ---catalog entry should be gone -SELECT count(*) -FROM _timescaledb_catalog.continuous_agg ca -WHERE user_view_name = 'mat_test'; - ---mat table, user_view, direct view and partial view all gone -select count(*) from pg_class where relname = :'PART_VIEW_NAME'; -select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; -select count(*) from pg_class where relname = :'DIR_VIEW_NAME'; -select count(*) from pg_class where relname = 'mat_test'; - - ---test dropping raw table -DROP TABLE conditions; -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - ---no data in hyper table on purpose so that CASCADE is not required because of chunks - -CREATE MATERIALIZED VIEW mat_drop_test(timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec) WITH NO DATA; - -\set ON_ERROR_STOP 0 -DROP TABLE conditions; -\set ON_ERROR_STOP 1 - ---insert data now - -insert into conditions -select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70, NULL; -insert into conditions -select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40, NULL; -insert into conditions -select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; - - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_drop_test' -\gset - -SET client_min_messages TO NOTICE; -CALL refresh_continuous_aggregate('mat_drop_test', NULL, NULL); - ---force invalidation -insert into conditions -select generate_series('2017-11-01 00:00'::timestamp, '2017-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; - -select count(*) from _timescaledb_catalog.continuous_aggs_invalidation_threshold; -select count(*) from _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log; - -DROP TABLE conditions CASCADE; - ---catalog entry should be gone -SELECT count(*) -FROM _timescaledb_catalog.continuous_agg ca -WHERE user_view_name = 'mat_drop_test'; -select count(*) from _timescaledb_catalog.continuous_aggs_invalidation_threshold; -select count(*) from _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log; -select count(*) from _timescaledb_catalog.continuous_aggs_materialization_invalidation_log; - -SELECT * FROM _timescaledb_config.bgw_job; - ---mat table, user_view, and partial view all gone -select count(*) from pg_class where relname = :'PART_VIEW_NAME'; -select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; -select count(*) from pg_class where relname = 'mat_drop_test'; - ---TEST With options - -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - -CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) -WITH (timescaledb.continuous, - timescaledb.materialized_only=true, - timescaledb.finalized=false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec), location, humidity, temperature WITH NO DATA; - -SELECT add_continuous_aggregate_policy('mat_with_test', NULL, '5 h'::interval, '12 h'::interval); -SELECT alter_job(id, schedule_interval => '1h') FROM _timescaledb_config.bgw_job; -SELECT schedule_interval FROM _timescaledb_config.bgw_job; - -SELECT alter_job(id, schedule_interval => '2h') FROM _timescaledb_config.bgw_job; -SELECT schedule_interval FROM _timescaledb_config.bgw_job; - -select indexname, indexdef from pg_indexes where tablename = -(SELECT h.table_name -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_with_test') -order by indexname; - -DROP MATERIALIZED VIEW mat_with_test; ---no additional indexes -CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) -WITH (timescaledb.continuous, - timescaledb.materialized_only=true, - timescaledb.create_group_indexes=false, - timescaledb.finalized=false) -as -select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec), location, humidity, temperature WITH NO DATA; - -select indexname, indexdef from pg_indexes where tablename = -(SELECT h.table_name -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'mat_with_test'); - -DROP TABLE conditions CASCADE; - ---test WITH using a hypertable with an integer time dimension -CREATE TABLE conditions ( - timec INT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - -CREATE OR REPLACE FUNCTION integer_now_conditions() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_conditions'); - -CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) -WITH (timescaledb.continuous, - timescaledb.materialized_only=true, - timescaledb.finalized=false) -as -select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket(100, timec) WITH NO DATA; - -SELECT add_continuous_aggregate_policy('mat_with_test', NULL, 500::integer, '12 h'::interval); -SELECT alter_job(id, schedule_interval => '2h') FROM _timescaledb_config.bgw_job; - -SELECT schedule_interval FROM _timescaledb_config.bgw_job; - -DROP TABLE conditions CASCADE; - - ---test space partitions -CREATE TABLE space_table ( - time BIGINT, - dev BIGINT, - data BIGINT -); - -SELECT create_hypertable( - 'space_table', - 'time', - chunk_time_interval => 10, - partitioning_column => 'dev', - number_partitions => 3); - -CREATE OR REPLACE FUNCTION integer_now_space_table() returns BIGINT LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), BIGINT '0') FROM space_table $$; -SELECT set_integer_now_func('space_table', 'integer_now_space_table'); - -CREATE MATERIALIZED VIEW space_view -WITH (timescaledb.continuous, - timescaledb.materialized_only=true, - timescaledb.finalized=false) -AS SELECT time_bucket('4', time), COUNT(data) - FROM space_table - GROUP BY 1 WITH NO DATA; - -INSERT INTO space_table VALUES - (0, 1, 1), (0, 2, 1), (1, 1, 1), (1, 2, 1), - (10, 1, 1), (10, 2, 1), (11, 1, 1), (11, 2, 1); - -SELECT h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA", - direct_view_name as "DIR_VIEW_NAME", - direct_view_schema as "DIR_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'space_view' -\gset - -SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" - ORDER BY time_bucket, chunk_id; - - -CALL refresh_continuous_aggregate('space_view', NULL, NULL); - -SELECT * FROM space_view ORDER BY 1; - -SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" - ORDER BY time_bucket, chunk_id; - - -INSERT INTO space_table VALUES (3, 2, 1); - -CALL refresh_continuous_aggregate('space_view', NULL, NULL); - -SELECT * FROM space_view ORDER BY 1; - -SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" - ORDER BY time_bucket, chunk_id; - - -INSERT INTO space_table VALUES (2, 3, 1); - -CALL refresh_continuous_aggregate('space_view', NULL, NULL); - -SELECT * FROM space_view ORDER BY 1; - -SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" - ORDER BY time_bucket, chunk_id; - - -DROP TABLE space_table CASCADE; - --- --- TEST FINALIZEFUNC_EXTRA --- - --- create special aggregate to test ffunc_extra --- Raise warning with the actual type being passed in -CREATE OR REPLACE FUNCTION fake_ffunc(a int8, b int, c int, d int, x anyelement) -RETURNS anyelement AS $$ -BEGIN - RAISE WARNING 'type % %', pg_typeof(d), pg_typeof(x); - RETURN x; -END; -$$ -LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION fake_sfunc(a int8, b int, c int, d int, x anyelement) -RETURNS int8 AS $$ -BEGIN - RETURN b; -END; $$ -LANGUAGE plpgsql; - - -CREATE AGGREGATE aggregate_to_test_ffunc_extra(int, int, int, anyelement) ( - SFUNC = fake_sfunc, - STYPE = int8, - COMBINEFUNC = int8pl, - FINALFUNC = fake_ffunc, - PARALLEL = SAFE, - FINALFUNC_EXTRA -); - -CREATE TABLE conditions ( - timec INT NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL, - lowp double precision NULL, - highp double precision null, - allnull double precision null - ); - -select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); - -CREATE OR REPLACE FUNCTION integer_now_conditions() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; -SELECT set_integer_now_func('conditions', 'integer_now_conditions'); - -insert into conditions -select generate_series(0, 200, 10), 'POR', 55, 75, 40, 70, NULL; - - -CREATE MATERIALIZED VIEW mat_ffunc_test -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket(100, timec), aggregate_to_test_ffunc_extra(timec, 1, 3, 'test'::text) -from conditions -group by time_bucket(100, timec); - -SELECT * FROM mat_ffunc_test; - -DROP MATERIALIZED view mat_ffunc_test; - -CREATE MATERIALIZED VIEW mat_ffunc_test -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select time_bucket(100, timec), aggregate_to_test_ffunc_extra(timec, 4, 5, bigint '123') -from conditions -group by time_bucket(100, timec); - -SELECT * FROM mat_ffunc_test; - ---refresh mat view test when time_bucket is not projected -- -DROP MATERIALIZED VIEW mat_ffunc_test; -CREATE MATERIALIZED VIEW mat_refresh_test -WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) -as -select location, max(humidity) -from conditions -group by time_bucket(100, timec), location WITH NO DATA; - -insert into conditions -select generate_series(0, 50, 10), 'NYC', 55, 75, 40, 70, NULL; - -CALL refresh_continuous_aggregate('mat_refresh_test', NULL, NULL); -SELECT * FROM mat_refresh_test order by 1,2 ; - --- test for bug when group by is not in project list -CREATE MATERIALIZED VIEW conditions_grpby_view with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) as -select time_bucket(100, timec), sum(humidity) -from conditions -group by time_bucket(100, timec), location; -select * from conditions_grpby_view order by 1, 2; - -CREATE MATERIALIZED VIEW conditions_grpby_view2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) as -select time_bucket(100, timec), sum(humidity) -from conditions -group by time_bucket(100, timec), location -having avg(temperature) > 0; - -select * from conditions_grpby_view2 order by 1, 2; - --- Test internal functions for continuous aggregates -SELECT test.continuous_aggs_find_view('mat_refresh_test'); - --- Test pseudotype/enum handling -CREATE TYPE status_enum AS ENUM ( - 'red', - 'yellow', - 'green' -); - -CREATE TABLE cagg_types ( - time TIMESTAMPTZ NOT NULL, - status status_enum, - names NAME[], - floats FLOAT[] -); - -SELECT - table_name -FROM - create_hypertable('cagg_types', 'time'); - -INSERT INTO cagg_types -SELECT - '2000-01-01', - 'yellow', - '{foo,bar,baz}', - '{1,2.5,3}'; - -CREATE MATERIALIZED VIEW mat_types WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1d', time), - min(status) AS status, - max(names) AS names, - min(floats) AS floats -FROM - cagg_types -GROUP BY - 1; - -CALL refresh_continuous_aggregate('mat_types',NULL,NULL); -SELECT * FROM mat_types; - -------------------------------------------------------------------------------------- --- Test issue #2616 where cagg view contains an experssion with several aggregates in - -CREATE TABLE water_consumption -( - sensor_id integer NOT NULL, - timestamp timestamp(0) NOT NULL, - water_index integer -); - -SELECT create_hypertable('water_consumption', 'timestamp', 'sensor_id', 2); - -INSERT INTO public.water_consumption (sensor_id, timestamp, water_index) VALUES - (1, '2010-11-03 09:42:30', 1030), - (1, '2010-11-03 09:42:40', 1032), - (1, '2010-11-03 09:42:50', 1035), - (1, '2010-11-03 09:43:30', 1040), - (1, '2010-11-03 09:43:40', 1045), - (1, '2010-11-03 09:43:50', 1050), - (1, '2010-11-03 09:44:30', 1052), - (1, '2010-11-03 09:44:40', 1057), - (1, '2010-11-03 09:44:50', 1060), - (1, '2010-11-03 09:45:30', 1063), - (1, '2010-11-03 09:45:40', 1067), - (1, '2010-11-03 09:45:50', 1070); - --- The test with the view originally reported in the issue. -CREATE MATERIALIZED VIEW water_consumption_aggregation_minute - WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized=false) -AS -SELECT sensor_id, - time_bucket(INTERVAL '1 minute', timestamp) + '1 minute' AS timestamp, - (max(water_index) - min(water_index)) AS water_consumption -FROM water_consumption -GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) -WITH NO DATA; - -CALL refresh_continuous_aggregate('water_consumption_aggregation_minute', NULL, NULL); - --- The results of the view and the query over hypertable should be the same -SELECT * FROM water_consumption_aggregation_minute ORDER BY water_consumption; -SELECT sensor_id, - time_bucket(INTERVAL '1 minute', timestamp) + '1 minute' AS timestamp, - (max(water_index) - min(water_index)) AS water_consumption -FROM water_consumption -GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) -ORDER BY water_consumption; - --- Simplified test, where the view doesn't contain all group by clauses -CREATE MATERIALIZED VIEW water_consumption_no_select_bucket - WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized=false) -AS -SELECT sensor_id, - (max(water_index) - min(water_index)) AS water_consumption -FROM water_consumption -GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) -WITH NO DATA; - -CALL refresh_continuous_aggregate('water_consumption_no_select_bucket', NULL, NULL); - --- The results of the view and the query over hypertable should be the same -SELECT * FROM water_consumption_no_select_bucket ORDER BY water_consumption; -SELECT sensor_id, - (max(water_index) - min(water_index)) AS water_consumption -FROM water_consumption -GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) -ORDER BY water_consumption; - --- The test with SELECT matching GROUP BY and placing aggregate expression not the last -CREATE MATERIALIZED VIEW water_consumption_aggregation_no_addition - WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized=false) -AS -SELECT sensor_id, - (max(water_index) - min(water_index)) AS water_consumption, - time_bucket(INTERVAL '1 minute', timestamp) AS timestamp -FROM water_consumption -GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) -WITH NO DATA; - -CALL refresh_continuous_aggregate('water_consumption_aggregation_no_addition', NULL, NULL); - --- The results of the view and the query over hypertable should be the same -SELECT * FROM water_consumption_aggregation_no_addition ORDER BY water_consumption; -SELECT sensor_id, - (max(water_index) - min(water_index)) AS water_consumption, - time_bucket(INTERVAL '1 minute', timestamp) AS timestamp -FROM water_consumption -GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) -ORDER BY water_consumption; - -DROP TABLE water_consumption CASCADE; - ----- ---- github issue 2655 --- -create table raw_data(time timestamptz, search_query text, cnt integer, cnt2 integer); -select create_hypertable('raw_data','time', chunk_time_interval=>'15 days'::interval); -insert into raw_data select '2000-01-01','Q1'; - ---having has exprs that appear in select -CREATE MATERIALIZED VIEW search_query_count_1m WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) -AS - SELECT search_query,count(search_query) as count, - time_bucket(INTERVAL '1 minute', time) AS bucket - FROM raw_data - WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 - GROUP BY search_query, bucket HAVING count(search_query) > 3 OR sum(cnt) > 1; - ---having has aggregates + grp by columns that appear in select -CREATE MATERIALIZED VIEW search_query_count_2 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), - time_bucket(INTERVAL '1 minute', time) AS bucket - FROM raw_data - WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 - GROUP BY search_query, bucket -HAVING count(search_query) > 3 OR sum(cnt) > 1 OR - ( sum(cnt) + count(cnt)) > 1 - AND search_query = 'Q1'; - -CREATE MATERIALIZED VIEW search_query_count_3 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), - time_bucket(INTERVAL '1 minute', time) AS bucket - FROM raw_data - WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 - GROUP BY cnt +cnt2 , bucket, search_query - HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; - -insert into raw_data select '2000-01-01 00:00+0','Q1', 1, 100; -insert into raw_data select '2000-01-01 00:00+0','Q1', 2, 200; -insert into raw_data select '2000-01-01 00:00+0','Q1', 3, 300; -insert into raw_data select '2000-01-02 00:00+0','Q2', 10, 10; -insert into raw_data select '2000-01-02 00:00+0','Q2', 20, 20; - -CALL refresh_continuous_aggregate('search_query_count_1m', NULL, NULL); -SELECT * FROM search_query_count_1m ORDER BY 1, 2; - ---only 1 of these should appear in the result -insert into raw_data select '2000-01-02 00:00+0','Q3', 0, 0; -insert into raw_data select '2000-01-03 00:00+0','Q4', 20, 20; - -CALL refresh_continuous_aggregate('search_query_count_1m', NULL, NULL); -SELECT * FROM search_query_count_1m ORDER BY 1, 2; - ---refresh search_query_count_2--- -CALL refresh_continuous_aggregate('search_query_count_2', NULL, NULL); -SELECT * FROM search_query_count_2 ORDER BY 1, 2; - ---refresh search_query_count_3--- -CALL refresh_continuous_aggregate('search_query_count_3', NULL, NULL); -SELECT * FROM search_query_count_3 ORDER BY 1, 2, 3; - ---- TEST enable compression on continuous aggregates -CREATE VIEW cagg_compression_status as -SELECT ca.mat_hypertable_id AS mat_htid, - ca.user_view_name AS cagg_name , - h.schema_name AS mat_schema_name, - h.table_name AS mat_table_name, - ca.materialized_only -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -; -SELECT mat_htid AS "MAT_HTID" - , mat_schema_name || '.' || mat_table_name AS "MAT_HTNAME" - , mat_table_name AS "MAT_TABLE_NAME" -FROM cagg_compression_status -WHERE cagg_name = 'search_query_count_3' \gset - -ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true'); -SELECT cagg_name, mat_table_name -FROM cagg_compression_status where cagg_name = 'search_query_count_3'; -\x -SELECT * FROM timescaledb_information.compression_settings -WHERE hypertable_name = :'MAT_TABLE_NAME'; -\x - -SELECT compress_chunk(ch) -FROM show_chunks('search_query_count_3') ch; - -SELECT * from search_query_count_3 ORDER BY 1, 2, 3; - --- insert into a new region of the hypertable and then refresh the cagg --- (note we still do not support refreshes into existing regions. --- cagg chunks do not map 1-1 to hypertabl regions. They encompass --- more data --- ). -insert into raw_data select '2000-05-01 00:00+0','Q3', 0, 0; - ---this one fails now -\set ON_ERROR_STOP 0 -CALL refresh_continuous_aggregate('search_query_count_3', NULL, '2000-06-01 00:00+0'::timestamptz); -CALL refresh_continuous_aggregate('search_query_count_3', '2000-05-01 00:00+0'::timestamptz, '2000-06-01 00:00+0'::timestamptz); -\set ON_ERROR_STOP 1 - ---insert row -insert into raw_data select '2001-05-10 00:00+0','Q3', 100, 100; - ---this should succeed since it does not refresh any compressed regions in the cagg -CALL refresh_continuous_aggregate('search_query_count_3', '2001-05-01 00:00+0'::timestamptz, '2001-06-01 00:00+0'::timestamptz); - ---verify watermark and check that chunks are compressed -SELECT _timescaledb_functions.to_timestamp(w) FROM _timescaledb_functions.cagg_watermark(:'MAT_HTID') w; - -SELECT chunk_name, range_start, range_end, is_compressed -FROM timescaledb_information.chunks -WHERE hypertable_name = :'MAT_TABLE_NAME' -ORDER BY 1; - -SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log -WHERE materialization_id = :'MAT_HTID' ORDER BY 1, 2,3; - -SELECT * from search_query_count_3 -WHERE bucket > '2001-01-01' -ORDER BY 1, 2, 3; - ---now disable compression , will error out -- -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'false'); -\set ON_ERROR_STOP 1 - -SELECT decompress_chunk(format('%I.%I', schema_name, table_name)) -FROM _timescaledb_catalog.chunk -WHERE hypertable_id = :'MAT_HTID' and status = 1; - -SELECT cagg_name, mat_table_name -FROM cagg_compression_status where cagg_name = 'search_query_count_3'; -SELECT view_name, materialized_only, compression_enabled -FROM timescaledb_information.continuous_aggregates -where view_name = 'search_query_count_3'; - --- TEST caggs on table with more columns than in the cagg view defn -- -CREATE TABLE test_morecols ( time TIMESTAMPTZ NOT NULL, - val1 INTEGER, val2 INTEGER, val3 INTEGER, val4 INTEGER, - val5 INTEGER, val6 INTEGER, val7 INTEGER, val8 INTEGER); -SELECT create_hypertable('test_morecols', 'time', chunk_time_interval=> '7 days'::interval); -INSERT INTO test_morecols -SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 55, 75, 40, 70, NULL, 100, 200, 200; - -CREATE MATERIALIZED VIEW test_morecols_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) -AS SELECT time_bucket('30 days',time), avg(val1), count(val2) - FROM test_morecols GROUP BY 1; - -ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true'); - -SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch; - -SELECT * FROM test_morecols_cagg; - -SELECT view_name, materialized_only, compression_enabled -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_morecols_cagg'; - ---should keep compressed option, modify only materialized -- -ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.materialized_only='true'); - -SELECT view_name, materialized_only, compression_enabled -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_morecols_cagg'; - -CREATE TABLE issue3248(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); -CREATE INDEX ON issue3248(time DESC); -CREATE INDEX ON issue3248(device_id,time DESC); -SELECT create_hypertable('issue3248','time',create_default_indexes:=false); - -ALTER TABLE issue3248 DROP COLUMN filler_1; -INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) -SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL -FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','8h') gtime(time), - generate_series(1,5,1) gdevice(device_id); -ALTER TABLE issue3248 DROP COLUMN filler_2; -INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) -SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL -FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','8h') gtime(time), - generate_series(1,5,1) gdevice(device_id); -ALTER TABLE issue3248 DROP COLUMN filler_3; -INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) -SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL -FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','8h') gtime(time), - generate_series(1,5,1) gdevice(device_id); -ANALYZE issue3248; - -CREATE materialized view issue3248_cagg WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) -AS SELECT time_bucket('1h',time), device_id, min(v0), max(v1), avg(v2) -FROM issue3248 GROUP BY 1,2; - -SELECT - FROM issue3248 AS m, - LATERAL(SELECT m FROM issue3248_cagg WHERE avg IS NULL LIMIT 1) AS lat; - --- test that option create_group_indexes is taken into account -CREATE TABLE test_group_idx ( -time timestamptz, -symbol int, -value numeric -); - -select create_hypertable('test_group_idx', 'time'); - -insert into test_group_idx -select t, round(random()*10), random()*5 -from generate_series('2020-01-01', '2020-02-25', INTERVAL '12 hours') t; - -create materialized view cagg_index_true -with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.create_group_indexes=true, timescaledb.finalized=false) as -select - time_bucket('1 day', "time") as bucket, - sum(value), - symbol -from test_group_idx -group by bucket, symbol; - -create materialized view cagg_index_false -with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.create_group_indexes=false, timescaledb.finalized=false) as -select - time_bucket('1 day', "time") as bucket, - sum(value), - symbol -from test_group_idx -group by bucket, symbol; - -create materialized view cagg_index_default -with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) as -select - time_bucket('1 day', "time") as bucket, - sum(value), - symbol -from test_group_idx -group by bucket, symbol; - --- see corresponding materialization_hypertables -select view_name, materialization_hypertable_name from timescaledb_information.continuous_aggregates ca -where view_name like 'cagg_index_%'; - --- now make sure a group index has been created when explicitly asked for -\x on -select i.* -from pg_indexes i -join pg_class c - on schemaname = relnamespace::regnamespace::text - and tablename = relname -where tablename in (select materialization_hypertable_name from timescaledb_information.continuous_aggregates -where view_name like 'cagg_index_%') -order by tablename; -\x off - --- Test View Target Entries that contain both aggrefs and Vars in the same expression -CREATE TABLE transactions -( - "time" timestamp with time zone NOT NULL, - dummy1 integer, - dummy2 integer, - dummy3 integer, - dummy4 integer, - dummy5 integer, - amount integer, - fiat_value integer -); - -SELECT create_hypertable('transactions', 'time'); - -INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); - -INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); - -INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); - -INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); - -CREATE materialized view cashflows( - bucket, - amount, - cashflow, - cashflow2 -) WITH ( - timescaledb.continuous, - timescaledb.materialized_only = true, - timescaledb.finalized = false -) AS -SELECT time_bucket ('1 day', time) AS bucket, - amount, - CASE - WHEN amount < 0 THEN (0 - sum(fiat_value)) - ELSE sum(fiat_value) - END AS cashflow, - amount + sum(fiat_value) -FROM transactions -GROUP BY bucket, amount; - -SELECT h.table_name AS "MAT_TABLE_NAME", - partial_view_name AS "PART_VIEW_NAME", - direct_view_name AS "DIRECT_VIEW_NAME" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cashflows' -\gset - --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" -\d+ "_timescaledb_internal".:"PART_VIEW_NAME" -\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" -\d+ 'cashflows' - -SELECT * FROM cashflows ORDER BY bucket, amount, cashflow, cashflow2; - --- Indexes on not finalized caggs are not allowed -\set ON_ERROR_STOP 0 -CREATE INDEX index_on_not_finalized_cagg ON cashflows(cashflow); -\set ON_ERROR_STOP 1 diff --git a/tsl/test/sql/include/cagg_migrate_common.sql b/tsl/test/sql/include/cagg_migrate_common.sql index 51b71c0d25c..bdbbb9f374e 100644 --- a/tsl/test/sql/include/cagg_migrate_common.sql +++ b/tsl/test/sql/include/cagg_migrate_common.sql @@ -2,32 +2,22 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- Setup some variables +SELECT + format('\! zcat include/data/cagg_migrate_%1$s.sql.gz > %2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "ZCAT_CMD", + format('%2$s/results/cagg_migrate_%1$s.sql', lower(:'TIME_DIMENSION_DATATYPE'), :'TEST_OUTPUT_DIR') AS "TEST_SCHEMA_FILE" +\gset -\if :IS_DISTRIBUTED -\echo 'Running distributed hypertable tests' -\else -\echo 'Running local hypertable tests' -\endif +-- decompress dump file +:ZCAT_CMD -CREATE TABLE conditions ( - "time" :TIME_DIMENSION_DATATYPE NOT NULL, - temperature NUMERIC -); - -\if :IS_DISTRIBUTED - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); - \else - SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); - \endif -\else - \if :IS_TIME_DIMENSION - SELECT table_name FROM create_hypertable('conditions', 'time'); - \else - SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); - \endif -\endif +-- restore dump +SELECT timescaledb_pre_restore(); +\ir :TEST_SCHEMA_FILE +SELECT timescaledb_post_restore(); + +-- Make sure no scheduled job will be executed during the regression tests +SELECT _timescaledb_functions.stop_background_workers(); \if :IS_TIME_DIMENSION INSERT INTO conditions ("time", temperature) @@ -42,85 +32,24 @@ CREATE TABLE conditions ( FROM public.conditions $$; - \if :IS_DISTRIBUTED - SELECT - 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT" - \gset - CALL distributed_exec (:'STMT'); - \endif - - SELECT set_integer_now_func('conditions', 'integer_now'); - INSERT INTO conditions ("time", temperature) SELECT generate_series(1, 1000, 1), 0.25; \endif --- new cagg format (finalized=true) -CREATE MATERIALIZED VIEW conditions_summary_daily_new -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket -WITH NO DATA; - --- older continuous aggregate to be migrated -CREATE MATERIALIZED VIEW conditions_summary_daily -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 day', "time") AS bucket, -\else - time_bucket(INTEGER '24', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; - --- for permission tests -CREATE MATERIALIZED VIEW conditions_summary_weekly -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT -\if :IS_TIME_DIMENSION - time_bucket(INTERVAL '1 week', "time") AS bucket, -\else - time_bucket(INTEGER '168', "time") AS bucket, -\endif - MIN(temperature), - MAX(temperature), - AVG(temperature), - SUM(temperature) -FROM - conditions -GROUP BY - bucket; +CALL refresh_continuous_aggregate('conditions_summary_daily', NULL, NULL); +CALL refresh_continuous_aggregate('conditions_summary_weekly', NULL, NULL); \set ON_ERROR_STOP 0 -- should fail because we don't need to migrate finalized caggs CALL cagg_migrate('conditions_summary_daily_new'); -\set ON_ERROR_STOP 1 -\set ON_ERROR_STOP 0 -- should fail relation does not exist CALL cagg_migrate('conditions_summary_not_cagg'); + CREATE TABLE conditions_summary_not_cagg(); + -- should fail continuous agg does not exist CALL cagg_migrate('conditions_summary_not_cagg'); \set ON_ERROR_STOP 1 @@ -376,3 +305,5 @@ TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCA DROP MATERIALIZED VIEW conditions_summary_daily; DROP MATERIALIZED VIEW conditions_summary_weekly; DROP TABLE conditions; + +SELECT _timescaledb_functions.start_background_workers(); diff --git a/tsl/test/sql/include/data/cagg_migrate_integer.sql.gz b/tsl/test/sql/include/data/cagg_migrate_integer.sql.gz new file mode 100644 index 00000000000..9ff440c59b8 Binary files /dev/null and b/tsl/test/sql/include/data/cagg_migrate_integer.sql.gz differ diff --git a/tsl/test/sql/include/data/cagg_migrate_timestamp.sql.gz b/tsl/test/sql/include/data/cagg_migrate_timestamp.sql.gz new file mode 100644 index 00000000000..4dfb57f0a3b Binary files /dev/null and b/tsl/test/sql/include/data/cagg_migrate_timestamp.sql.gz differ diff --git a/tsl/test/sql/include/data/cagg_migrate_timestamptz.sql.gz b/tsl/test/sql/include/data/cagg_migrate_timestamptz.sql.gz new file mode 100644 index 00000000000..2f16dc60f46 Binary files /dev/null and b/tsl/test/sql/include/data/cagg_migrate_timestamptz.sql.gz differ diff --git a/tsl/test/sql/telemetry_stats.sql.in b/tsl/test/sql/telemetry_stats.sql.in index 0b7e35776dd..47fde5eb613 100644 --- a/tsl/test/sql/telemetry_stats.sql.in +++ b/tsl/test/sql/telemetry_stats.sql.in @@ -56,16 +56,6 @@ FROM hyper GROUP BY hour, device; -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; - -- Create another view (already have the "relations" view) CREATE VIEW devices AS SELECT DISTINCT ON (device) device @@ -86,14 +76,12 @@ INSERT INTO part SELECT * FROM normal; CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); -- ANALYZE to get updated reltuples stats ANALYZE normal, hyper, part; SELECT count(c) FROM show_chunks('hyper') c; SELECT count(c) FROM show_chunks('contagg') c; -SELECT count(c) FROM show_chunks('contagg_old') c; -- Update and show the telemetry report REFRESH MATERIALIZED VIEW telemetry_report; @@ -142,7 +130,6 @@ select add_job('job_test_fixed', '1 week'); select add_job('job_test_drifting', '1 week', fixed_schedule => false); -- add continuous aggregate refresh policy for contagg select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed -- add retention policy, fixed select add_retention_policy('hyper', interval '1 year', initial_start => now()); -- add compression policy