From 2f1dd4a13795a2734a413291cc1205d6e157d520 Mon Sep 17 00:00:00 2001 From: Nikhil Sontakke Date: Mon, 26 Feb 2024 16:44:26 +0530 Subject: [PATCH] Add support for correlated constraints Allow users to specify a specific column to be used as a correlated constraint using the add_dimension() API. We store such correlated constraints in the dimensions related timescaledb catalog tables. The "dimension" catalog table has been amended by adding a "type" column. This column now explicitly stores the type: open, closed or correlated as appropriate. We create dimension_slice and chunk_constraint entries for chunks which have correlated constraints on them. The dimension slice entry will have -inf/+inf as start/end range initially for a given correlated constraint and the chunk_constraint entry will refer back to this slice entry. This start/end range will be refreshed later. One of the entry points is during compression for now. We can thus store the min/max values for such correlated contraints in these catalog tables at the per-chunk level. Note that correlated constraints do not participate in partitioning of the data. Such a correlated constraint will be used for chunk pruning if the WHERE clause of a SQL query specifies ranges on such a column. --- sql/ddl_api.sql | 3 +- sql/pre_install/tables.sql | 3 +- sql/updates/latest-dev.sql | 127 +++++++++ sql/updates/reverse-dev.sql | 100 +++++++ src/chunk.c | 20 +- src/chunk_adaptive.c | 29 +- src/chunk_adaptive.h | 2 + src/chunk_constraint.c | 83 +++++- src/chunk_constraint.h | 23 +- src/dimension.c | 197 ++++++++++++-- src/dimension.h | 18 +- src/dimension_slice.c | 256 ++++++++++++++++++ src/dimension_slice.h | 6 + src/hypertable.c | 2 + src/hypertable.h | 1 + src/hypertable_restrict_info.c | 18 +- src/process_utility.c | 2 +- src/ts_catalog/catalog.h | 2 + test/expected/chunk_adaptive.out | 6 +- test/expected/create_chunks.out | 8 +- test/expected/create_hypertable.out | 54 ++-- test/expected/ddl-13.out | 16 +- test/expected/ddl-14.out | 16 +- test/expected/ddl-15.out | 16 +- test/expected/ddl-16.out | 16 +- test/expected/drop_hypertable.out | 16 +- test/expected/drop_owned.out | 4 +- test/expected/drop_schema.out | 4 +- test/expected/partition.out | 42 +-- tsl/src/compression/api.c | 16 ++ tsl/src/compression/create.c | 1 + tsl/test/expected/bgw_policy.out | 16 +- tsl/test/expected/cagg_migrate.out | 10 +- tsl/test/expected/correlated_constraints.out | 227 ++++++++++++++++ tsl/test/expected/tsl_tables.out | 16 +- tsl/test/shared/expected/extension.out | 2 +- tsl/test/sql/CMakeLists.txt | 2 +- tsl/test/sql/correlated_constraints.sql | 142 ++++++++++ .../include/data/cagg_migrate_integer.sql.gz | Bin 1650 -> 1656 bytes .../data/cagg_migrate_timestamp.sql.gz | Bin 1710 -> 1715 bytes .../data/cagg_migrate_timestamptz.sql.gz | Bin 1702 -> 1707 bytes 41 files changed, 1349 insertions(+), 173 deletions(-) create mode 100644 tsl/test/expected/correlated_constraints.out create mode 100644 tsl/test/sql/correlated_constraints.sql diff --git a/sql/ddl_api.sql b/sql/ddl_api.sql index 1a37b3f7032..9701cd962a4 100644 --- a/sql/ddl_api.sql +++ b/sql/ddl_api.sql @@ -133,7 +133,8 @@ CREATE OR REPLACE FUNCTION @extschema@.add_dimension( number_partitions INTEGER = NULL, chunk_time_interval ANYELEMENT = NULL::BIGINT, partitioning_func REGPROC = NULL, - if_not_exists BOOLEAN = FALSE + if_not_exists BOOLEAN = FALSE, + correlated BOOLEAN = FALSE ) RETURNS TABLE(dimension_id INT, schema_name NAME, table_name NAME, column_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_dimension_add' LANGUAGE C VOLATILE; diff --git a/sql/pre_install/tables.sql b/sql/pre_install/tables.sql index e226ffe8a06..235d11463a6 100644 --- a/sql/pre_install/tables.sql +++ b/sql/pre_install/tables.sql @@ -100,13 +100,14 @@ CREATE TABLE _timescaledb_catalog.dimension ( compress_interval_length bigint NULL, integer_now_func_schema name NULL, integer_now_func name NULL, + type "char", -- table constraints CONSTRAINT dimension_pkey PRIMARY KEY (id), CONSTRAINT dimension_hypertable_id_column_name_key UNIQUE (hypertable_id, column_name), CONSTRAINT dimension_check CHECK ((partitioning_func_schema IS NULL AND partitioning_func IS NULL) OR (partitioning_func_schema IS NOT NULL AND partitioning_func IS NOT NULL)), CONSTRAINT dimension_check1 CHECK ((num_slices IS NULL AND interval_length IS NOT NULL) OR (num_slices IS NOT NULL AND interval_length IS NULL)), CONSTRAINT dimension_check2 CHECK ((integer_now_func_schema IS NULL AND integer_now_func IS NULL) OR (integer_now_func_schema IS NOT NULL AND integer_now_func IS NOT NULL)), - CONSTRAINT dimension_interval_length_check CHECK (interval_length IS NULL OR interval_length > 0), + CONSTRAINT dimension_interval_length_check CHECK (interval_length IS NULL OR interval_length > 0 OR type = 'C'), CONSTRAINT dimension_compress_interval_length_check CHECK (compress_interval_length IS NULL OR compress_interval_length > 0), CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE ); diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index 3b8ac88e728..05922b9c5b5 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -392,3 +392,130 @@ DROP FUNCTION IF EXISTS _timescaledb_functions.policy_job_error_retention_check( -- -- END bgw_job_stat_history -- + +DROP FUNCTION IF EXISTS @extschema@.add_dimension( + REGCLASS, + NAME, + INTEGER, + ANYELEMENT, + REGPROC, + BOOLEAN +); + +CREATE FUNCTION @extschema@.add_dimension( + hypertable REGCLASS, + column_name NAME, + number_partitions INTEGER = NULL, + chunk_time_interval ANYELEMENT = NULL::BIGINT, + partitioning_func REGPROC = NULL, + if_not_exists BOOLEAN = FALSE, + correlated BOOLEAN = FALSE +) RETURNS TABLE(dimension_id INT, schema_name NAME, table_name NAME, column_name NAME, created BOOL) +AS '@MODULE_PATHNAME@', 'ts_dimension_add' LANGUAGE C VOLATILE; + + +-- +-- Rebuild the catalog table `_timescaledb_catalog.dimension with type column +-- + +CREATE TABLE _timescaledb_internal._tmp_dimension +AS SELECT * from _timescaledb_catalog.dimension; + +CREATE TABLE _timescaledb_internal.tmp_dimension_seq_value AS +SELECT last_value, is_called FROM _timescaledb_catalog.dimension_id_seq; + +--drop foreign keys on dimension table +ALTER TABLE _timescaledb_catalog.dimension_slice DROP CONSTRAINT +dimension_slice_dimension_id_fkey; + +--drop dependent views +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS timescaledb_information.dimensions; +DROP VIEW IF EXISTS timescaledb_information.hypertable_compression_settings; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.dimension; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.dimension_id_seq; +DROP TABLE _timescaledb_catalog.dimension; + +CREATE TABLE _timescaledb_catalog.dimension ( + id serial NOT NULL , + hypertable_id integer NOT NULL, + column_name name NOT NULL, + column_type REGTYPE NOT NULL, + aligned boolean NOT NULL, + -- closed dimensions + num_slices smallint NULL, + partitioning_func_schema name NULL, + partitioning_func name NULL, + -- open dimensions (e.g., time) + interval_length bigint NULL, + -- compress interval is used by rollup procedure during compression + -- in order to merge multiple chunks into a single one + compress_interval_length bigint NULL, + integer_now_func_schema name NULL, + integer_now_func name NULL, + type "char", + -- table constraints + CONSTRAINT dimension_pkey PRIMARY KEY (id), + CONSTRAINT dimension_hypertable_id_column_name_key UNIQUE (hypertable_id, column_name), + CONSTRAINT dimension_check CHECK ((partitioning_func_schema IS NULL AND partitioning_func IS NULL) OR (partitioning_func_schema IS NOT NULL AND partitioning_func IS NOT NULL)), + CONSTRAINT dimension_check1 CHECK ((num_slices IS NULL AND interval_length IS NOT NULL) OR (num_slices IS NOT NULL AND interval_length IS NULL)), + CONSTRAINT dimension_check2 CHECK ((integer_now_func_schema IS NULL AND integer_now_func IS NULL) OR (integer_now_func_schema IS NOT NULL AND integer_now_func IS NOT NULL)), + CONSTRAINT dimension_interval_length_check CHECK (interval_length IS NULL OR interval_length > 0 OR type = 'C'), + CONSTRAINT dimension_compress_interval_length_check CHECK (compress_interval_length IS NULL OR compress_interval_length > 0), + CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.dimension +( id, hypertable_id, column_name, column_type, + aligned, num_slices, partitioning_func_schema, + partitioning_func, interval_length, + compress_interval_length, + integer_now_func_schema, integer_now_func, + type) +SELECT id, hypertable_id, column_name, column_type, + aligned, num_slices, partitioning_func_schema, + partitioning_func, interval_length, + compress_interval_length, + integer_now_func_schema, integer_now_func, + CASE WHEN interval_length IS NULL AND num_slices IS NOT NULL THEN + 'c' -- closed dimension + WHEN interval_length IS NOT NULL AND num_slices is NULL THEN + 'o' -- open dimension + ELSE + 'a' -- any.. This should never happen + END as type +FROM _timescaledb_internal._tmp_dimension; + +-- Check that there's no entry with type == 'a' +DO $$ +DECLARE + count_var INTEGER; +BEGIN + SELECT count(*) FROM _timescaledb_catalog.dimension INTO count_var WHERE + type = 'a'; + IF count_var != 0 THEN + RAISE EXCEPTION 'invalid dimension entry found!'; + END IF; +END +$$; + +ALTER SEQUENCE _timescaledb_catalog.dimension_id_seq OWNED BY _timescaledb_catalog.dimension.id; +SELECT setval('_timescaledb_catalog.dimension_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_dimension_seq_value; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', ''); +SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension', 'id'), ''); + +--add the foreign key constraints +ALTER TABLE _timescaledb_catalog.dimension_slice ADD CONSTRAINT +dimension_slice_dimension_id_fkey FOREIGN KEY (dimension_id) +REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; + +--cleanup +DROP TABLE _timescaledb_internal._tmp_dimension; +DROP TABLE _timescaledb_internal.tmp_dimension_seq_value; + +GRANT SELECT ON _timescaledb_catalog.dimension_id_seq TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.dimension TO PUBLIC; + +-- end recreate _timescaledb_catalog.dimension table -- diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 0c201759d2f..1f510d955c4 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -294,3 +294,103 @@ SET check_name = 'policy_job_error_retention_check' WHERE id = 2; +DROP FUNCTION IF EXISTS @extschema@.add_dimension( + REGCLASS, + NAME, + INTEGER, + ANYELEMENT, + REGPROC, + BOOLEAN, + BOOLEAN +); + +CREATE FUNCTION @extschema@.add_dimension( + hypertable REGCLASS, + column_name NAME, + number_partitions INTEGER = NULL, + chunk_time_interval ANYELEMENT = NULL::BIGINT, + partitioning_func REGPROC = NULL, + if_not_exists BOOLEAN = FALSE +) RETURNS TABLE(dimension_id INT, schema_name NAME, table_name NAME, column_name NAME, created BOOL) +AS '@MODULE_PATHNAME@', 'ts_dimension_add' LANGUAGE C VOLATILE; + +-- Recreate _timescaledb_catalog.dimension table without the type column -- +CREATE TABLE _timescaledb_internal._tmp_dimension +AS SELECT * from _timescaledb_catalog.dimension; + +CREATE TABLE _timescaledb_internal.tmp_dimension_seq_value AS +SELECT last_value, is_called FROM _timescaledb_catalog.dimension_id_seq; + +--drop foreign keys on dimension table +ALTER TABLE _timescaledb_catalog.dimension_slice DROP CONSTRAINT +dimension_slice_dimension_id_fkey; + +--drop dependent views +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS timescaledb_information.dimensions; +DROP VIEW IF EXISTS timescaledb_information.hypertable_compression_settings; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.dimension; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.dimension_id_seq; +DROP TABLE _timescaledb_catalog.dimension; + +CREATE TABLE _timescaledb_catalog.dimension ( + id serial NOT NULL , + hypertable_id integer NOT NULL, + column_name name NOT NULL, + column_type REGTYPE NOT NULL, + aligned boolean NOT NULL, + -- closed dimensions + num_slices smallint NULL, + partitioning_func_schema name NULL, + partitioning_func name NULL, + -- open dimensions (e.g., time) + interval_length bigint NULL, + -- compress interval is used by rollup procedure during compression + -- in order to merge multiple chunks into a single one + compress_interval_length bigint NULL, + integer_now_func_schema name NULL, + integer_now_func name NULL, + -- table constraints + CONSTRAINT dimension_pkey PRIMARY KEY (id), + CONSTRAINT dimension_hypertable_id_column_name_key UNIQUE (hypertable_id, column_name), + CONSTRAINT dimension_check CHECK ((partitioning_func_schema IS NULL AND partitioning_func IS NULL) OR (partitioning_func_schema IS NOT NULL AND partitioning_func IS NOT NULL)), + CONSTRAINT dimension_check1 CHECK ((num_slices IS NULL AND interval_length IS NOT NULL) OR (num_slices IS NOT NULL AND interval_length IS NULL)), + CONSTRAINT dimension_check2 CHECK ((integer_now_func_schema IS NULL AND integer_now_func IS NULL) OR (integer_now_func_schema IS NOT NULL AND integer_now_func IS NOT NULL)), + CONSTRAINT dimension_interval_length_check CHECK (interval_length IS NULL OR interval_length > 0), + CONSTRAINT dimension_compress_interval_length_check CHECK (compress_interval_length IS NULL OR compress_interval_length > 0), + CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.dimension +( id, hypertable_id, column_name, column_type, + aligned, num_slices, partitioning_func_schema, + partitioning_func, interval_length, + compress_interval_length, + integer_now_func_schema, integer_now_func) +SELECT id, hypertable_id, column_name, column_type, + aligned, num_slices, partitioning_func_schema, + partitioning_func, interval_length, + compress_interval_length, + integer_now_func_schema, integer_now_func +FROM _timescaledb_internal._tmp_dimension; + +ALTER SEQUENCE _timescaledb_catalog.dimension_id_seq OWNED BY _timescaledb_catalog.dimension.id; +SELECT setval('_timescaledb_catalog.dimension_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_dimension_seq_value; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', ''); +SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension', 'id'), ''); + +--add the foreign key constraints +ALTER TABLE _timescaledb_catalog.dimension_slice ADD CONSTRAINT +dimension_slice_dimension_id_fkey FOREIGN KEY (dimension_id) +REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; + +--cleanup +DROP TABLE _timescaledb_internal._tmp_dimension; +DROP TABLE _timescaledb_internal.tmp_dimension_seq_value; + +GRANT SELECT ON _timescaledb_catalog.dimension_id_seq TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.dimension TO PUBLIC; + +-- end recreate _timescaledb_catalog.dimension table -- diff --git a/src/chunk.c b/src/chunk.c index 4219843a53c..a0b3ce69efb 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -841,6 +841,7 @@ chunk_create_object(const Hypertable *ht, Hypercube *cube, const char *schema_na const char *table_name, const char *prefix, int32 chunk_id) { const Hyperspace *hs = ht->space; + const Hyperspace *ss = ht->correlated_space; Chunk *chunk; const char relkind = RELKIND_RELATION; @@ -848,7 +849,8 @@ chunk_create_object(const Hypertable *ht, Hypercube *cube, const char *schema_na schema_name = NameStr(ht->fd.associated_schema_name); /* Create a new chunk based on the hypercube */ - chunk = ts_chunk_create_base(chunk_id, hs->num_dimensions, relkind); + chunk = + ts_chunk_create_base(chunk_id, hs->num_dimensions + (ss ? ss->num_dimensions : 0), relkind); chunk->fd.hypertable_id = hs->hypertable_id; chunk->cube = cube; @@ -1091,6 +1093,9 @@ chunk_create_from_hypercube_after_lock(const Hypertable *ht, Hypercube *cube, prefix, get_next_chunk_id()); + /* Insert any new correlated constraint slices into metadata */ + ts_correlated_constraints_dimension_slice_insert(ht, chunk); + chunk_add_constraints(chunk); chunk_insert_into_metadata_after_lock(chunk); chunk_create_table_constraints(ht, chunk); @@ -3426,8 +3431,17 @@ ts_chunk_set_unordered(Chunk *chunk) bool ts_chunk_set_partial(Chunk *chunk) { + Cache *hcache; + bool set_status; + Hypertable *htable = + ts_hypertable_cache_get_cache_and_entry(chunk->hypertable_relid, CACHE_FLAG_NONE, &hcache); Assert(ts_chunk_is_compressed(chunk)); - return ts_chunk_add_status(chunk, CHUNK_STATUS_COMPRESSED_PARTIAL); + set_status = ts_chunk_add_status(chunk, CHUNK_STATUS_COMPRESSED_PARTIAL); + /* reset any correlated constraints after the partial uncompressed data */ + if (htable->correlated_space) + ts_correlated_constraints_dimension_slice_calculate_update(htable, chunk, true); + ts_cache_release(hcache); + return set_status; } /* No inserts, updates, and deletes are permitted on a frozen chunk. @@ -4794,7 +4808,7 @@ ts_chunk_merge_on_dimension(const Hypertable *ht, Chunk *chunk, const Chunk *mer ts_dimension_slice_insert(new_slice); } - ts_chunk_constraint_update_slice_id(chunk->fd.id, slice->fd.id, new_slice->fd.id); + ts_chunk_constraint_update_slice_id_name(chunk->fd.id, slice->fd.id, new_slice->fd.id, NULL); ChunkConstraints *ccs = ts_chunk_constraints_alloc(1, CurrentMemoryContext); ScanIterator iterator = ts_scan_iterator_create(CHUNK_CONSTRAINT, AccessShareLock, CurrentMemoryContext); diff --git a/src/chunk_adaptive.c b/src/chunk_adaptive.c index 0b9c1d843f0..f0d827ff90b 100644 --- a/src/chunk_adaptive.c +++ b/src/chunk_adaptive.c @@ -188,10 +188,20 @@ minmax_indexscan(Relation rel, Relation idxrel, AttrNumber attnum, Datum minmax[ TupleTableSlot *slot = table_slot_create(rel, NULL); bool nulls[2] = { true, true }; int i; + ScanDirection directions[2] = { ForwardScanDirection /* min */, + BackwardScanDirection /* max */ }; + int16 option = idxrel->rd_indoption[0]; + bool index_orderby_asc = ((option & INDOPTION_DESC) == 0); + + /* default index ordering is ASC, check if that's not the case */ + if (!index_orderby_asc) + { + directions[0] = BackwardScanDirection; + directions[1] = ForwardScanDirection; + } for (i = 0; i < 2; i++) { - static ScanDirection directions[2] = { BackwardScanDirection, ForwardScanDirection }; bool found_tuple; bool isnull; @@ -266,8 +276,9 @@ table_has_minmax_index(Oid relid, Oid atttype, Name attname, AttrNumber attnum) * * Returns true iff min and max is found, otherwise false. */ -static bool -chunk_get_minmax(Oid relid, Oid atttype, AttrNumber attnum, Datum minmax[2]) +bool +ts_chunk_get_minmax(Oid relid, Oid atttype, AttrNumber attnum, const char *call_context, + Datum minmax[2]) { Relation rel = table_open(relid, AccessShareLock); NameData attname; @@ -279,11 +290,11 @@ chunk_get_minmax(Oid relid, Oid atttype, AttrNumber attnum, Datum minmax[2]) if (res == MINMAX_NO_INDEX) { ereport(WARNING, - (errmsg("no index on \"%s\" found for adaptive chunking on chunk \"%s\"", + (errmsg("no index on \"%s\" found for %s on chunk \"%s\"", NameStr(attname), + call_context, get_rel_name(relid)), - errdetail("Adaptive chunking works best with an index on the dimension being " - "adapted."))); + errdetail("%s works best with an index on the dimension.", call_context))); res = minmax_heapscan(rel, atttype, attnum, minmax); } @@ -469,7 +480,11 @@ ts_calculate_chunk_interval(PG_FUNCTION_ARGS) slice_interval = slice->fd.range_end - slice->fd.range_start; - if (chunk_get_minmax(chunk->table_id, dim->fd.column_type, attno, minmax)) + if (ts_chunk_get_minmax(chunk->table_id, + dim->fd.column_type, + attno, + "adaptive chunking", + minmax)) { int64 min = ts_time_value_to_internal(minmax[0], dim->fd.column_type); int64 max = ts_time_value_to_internal(minmax[1], dim->fd.column_type); diff --git a/src/chunk_adaptive.h b/src/chunk_adaptive.h index 1b6cb113e47..0d89f8f3eab 100644 --- a/src/chunk_adaptive.h +++ b/src/chunk_adaptive.h @@ -24,6 +24,8 @@ typedef struct ChunkSizingInfo extern void ts_chunk_adaptive_sizing_info_validate(ChunkSizingInfo *info); extern void ts_chunk_sizing_func_validate(regproc func, ChunkSizingInfo *info); +extern bool ts_chunk_get_minmax(Oid relid, Oid atttype, AttrNumber attnum, const char *call_context, + Datum minmax[2]); extern TSDLLEXPORT ChunkSizingInfo *ts_chunk_sizing_info_get_default_disabled(Oid table_relid); extern TSDLLEXPORT int64 ts_chunk_calculate_initial_chunk_target_size(void); diff --git a/src/chunk_constraint.c b/src/chunk_constraint.c index f853804b5ff..851a7166c3b 100644 --- a/src/chunk_constraint.c +++ b/src/chunk_constraint.c @@ -88,9 +88,9 @@ chunk_constraints_expand(ChunkConstraints *ccs, int16 new_capacity) } static void -chunk_constraint_dimension_choose_name(Name dst, int32 dimension_slice_id) +chunk_constraint_dimension_choose_name(Name dst, const char *prefix, int32 dimension_slice_id) { - snprintf(NameStr(*dst), NAMEDATALEN, "constraint_%d", dimension_slice_id); + snprintf(NameStr(*dst), NAMEDATALEN, "%sconstraint_%d", prefix, dimension_slice_id); } static void @@ -115,7 +115,8 @@ chunk_constraint_choose_name(Name dst, const char *hypertable_constraint_name, i ChunkConstraint * ts_chunk_constraints_add(ChunkConstraints *ccs, int32 chunk_id, int32 dimension_slice_id, - const char *constraint_name, const char *hypertable_constraint_name) + const char *constraint_name, const char *hypertable_constraint_name, + char dimension_type) { ChunkConstraint *cc; @@ -123,12 +124,35 @@ ts_chunk_constraints_add(ChunkConstraints *ccs, int32 chunk_id, int32 dimension_ cc = &ccs->constraints[ccs->num_constraints++]; cc->fd.chunk_id = chunk_id; cc->fd.dimension_slice_id = dimension_slice_id; + switch (dimension_type) + { + case FD_DIMENSION_OPEN: + case FD_DIMENSION_CLOSED: + cc->type = CCONSTR_DIMENSION; + break; + case FD_DIMENSION_CHECK: + cc->type = CCONSTR_CHECK; + break; + case FD_DIMENSION_CORRELATED: + cc->type = CCONSTR_CORRELATED; + break; + default: + /* should not be possible */ + ereport(ERROR, (errmsg("unexpected dimension type %c", dimension_type))); + break; + } if (NULL == constraint_name) { - if (is_dimension_constraint(cc)) + if (is_dimension_constraint(cc) || is_correlated_constraint(cc)) { + /* + * for correlated constraints we choose a prefix of "_$CC_" to help + * us identify it on re-reading from the catalog. + */ chunk_constraint_dimension_choose_name(&cc->fd.constraint_name, + is_correlated_constraint(cc) ? CC_DIM_PREFIX : + "", cc->fd.dimension_slice_id); namestrcpy(&cc->fd.hypertable_constraint_name, ""); } @@ -163,7 +187,7 @@ chunk_constraint_fill_tuple_values(const ChunkConstraint *cc, Datum values[Natts values[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)] = NameGetDatum(&cc->fd.hypertable_constraint_name); - if (is_dimension_constraint(cc)) + if (is_dimension_constraint(cc) || is_correlated_constraint(cc)) nulls[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)] = true; else nulls[AttrNumberGetAttrOffset(Anum_chunk_constraint_dimension_slice_id)] = true; @@ -230,6 +254,7 @@ ts_chunk_constraints_add_from_tuple(ChunkConstraints *ccs, const TupleInfo *ti) Name constraint_name; Name hypertable_constraint_name; bool should_free; + char dimension_type = FD_DIMENSION_ANY; HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free); MemoryContext oldcxt; @@ -245,12 +270,22 @@ ts_chunk_constraints_add_from_tuple(ChunkConstraints *ccs, const TupleInfo *ti) dimension_slice_id = 0; hypertable_constraint_name = DatumGetName( values[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)]); + dimension_type = FD_DIMENSION_CLOSED; } else { dimension_slice_id = DatumGetInt32( values[AttrNumberGetAttrOffset(Anum_chunk_constraint_dimension_slice_id)]); hypertable_constraint_name = DatumGetName(DirectFunctionCall1(namein, CStringGetDatum(""))); + + /* check if it's a correlated constraint, we get the dimension's type for this */ + ScanIterator slice_iterator = + ts_dimension_slice_scan_iterator_create(NULL, CurrentMemoryContext); + DimensionSlice *slice = ts_dimension_slice_scan_iterator_get_by_id(&slice_iterator, + dimension_slice_id, + /* tuplock = */ NULL); + dimension_type = ts_dimension_get_dimension_type(slice->fd.dimension_id); + ts_scan_iterator_close(&slice_iterator); } constraints = ts_chunk_constraints_add(ccs, @@ -258,7 +293,8 @@ ts_chunk_constraints_add_from_tuple(ChunkConstraints *ccs, const TupleInfo *ti) Anum_chunk_constraint_chunk_id)]), dimension_slice_id, NameStr(*constraint_name), - NameStr(*hypertable_constraint_name)); + NameStr(*hypertable_constraint_name), + dimension_type); MemoryContextSwitchTo(oldcxt); @@ -492,7 +528,7 @@ ts_chunk_constraints_create(const Hypertable *ht, const Chunk *chunk) if (constr != NULL) newconstrs = lappend(newconstrs, constr); } - else + else if (!is_correlated_constraint(cc)) { create_non_dimensional_constraint(cc, chunk->table_id, @@ -781,7 +817,12 @@ ts_chunk_constraints_add_dimension_constraints(ChunkConstraints *ccs, int32 chun int i; for (i = 0; i < cube->num_slices; i++) - ts_chunk_constraints_add(ccs, chunk_id, cube->slices[i]->fd.id, NULL, NULL); + ts_chunk_constraints_add(ccs, + chunk_id, + cube->slices[i]->fd.id, + NULL, + NULL, + FD_DIMENSION_OPEN); return cube->num_slices; } @@ -802,7 +843,12 @@ chunk_constraint_add(HeapTuple constraint_tuple, void *arg) if (chunk_constraint_need_on_chunk(cc->chunk_relkind, constraint)) { - ts_chunk_constraints_add(cc->ccs, cc->chunk_id, 0, NULL, NameStr(constraint->conname)); + ts_chunk_constraints_add(cc->ccs, + cc->chunk_id, + 0, + NULL, + NameStr(constraint->conname), + FD_DIMENSION_CHECK); return CONSTR_PROCESSED; } @@ -835,7 +881,8 @@ chunk_constraint_add_check(HeapTuple constraint_tuple, void *arg) cc->chunk_id, 0, NameStr(constraint->conname), - NameStr(constraint->conname)); + NameStr(constraint->conname), + FD_DIMENSION_CHECK); return CONSTR_PROCESSED; } @@ -874,7 +921,8 @@ ts_chunk_constraint_create_on_chunk(const Hypertable *ht, const Chunk *chunk, Oi chunk->fd.id, 0, NULL, - NameStr(con->conname)); + NameStr(con->conname), + FD_DIMENSION_CHECK); ts_chunk_constraint_insert(cc); create_non_dimensional_constraint(cc, @@ -1180,7 +1228,8 @@ ts_chunk_constraint_adjust_meta(int32 chunk_id, const char *ht_constraint_name, } bool -ts_chunk_constraint_update_slice_id(int32 chunk_id, int32 old_slice_id, int32 new_slice_id) +ts_chunk_constraint_update_slice_id_name(int32 chunk_id, int32 old_slice_id, int32 new_slice_id, + const char *new_name) { ScanIterator iterator = ts_scan_iterator_create(CHUNK_CONSTRAINT, RowExclusiveLock, CurrentMemoryContext); @@ -1209,6 +1258,16 @@ ts_chunk_constraint_update_slice_id(int32 chunk_id, int32 old_slice_id, int32 ne Int32GetDatum(new_slice_id); repl[AttrNumberGetAttrOffset(Anum_chunk_constraint_dimension_slice_id)] = true; + if (new_name != NULL) + { + NameData new_namedata; + + namestrcpy(&new_namedata, new_name); + values[AttrNumberGetAttrOffset(Anum_chunk_constraint_constraint_name)] = + NameGetDatum(&new_namedata); + repl[AttrNumberGetAttrOffset(Anum_chunk_constraint_constraint_name)] = true; + } + new_tuple = heap_modify_tuple(tuple, ts_scanner_get_tupledesc(ti), values, replIsnull, repl); diff --git a/src/chunk_constraint.h b/src/chunk_constraint.h index 2ac7c890a23..4e173cae646 100644 --- a/src/chunk_constraint.h +++ b/src/chunk_constraint.h @@ -11,9 +11,20 @@ #include "ts_catalog/catalog.h" #include "hypertable.h" +typedef enum CConstrType /* types of chunk constraints */ +{ + CCONSTR_NONE, + CCONSTR_DIMENSION, + CCONSTR_CHECK, + CCONSTR_CORRELATED +} CConstrType; + +#define CC_DIM_PREFIX "_$CC_" /* Correlated Constraint dimension prefix */ + typedef struct ChunkConstraint { FormData_chunk_constraint fd; + CConstrType type; } ChunkConstraint; typedef struct ChunkConstraints @@ -27,7 +38,9 @@ typedef struct ChunkConstraints #define chunk_constraints_get(cc, i) &((cc)->constraints[i]) -#define is_dimension_constraint(cc) ((cc)->fd.dimension_slice_id > 0) +#define is_dimension_constraint(cc) \ + ((cc)->fd.dimension_slice_id > 0 && (cc)->type == CCONSTR_DIMENSION) +#define is_correlated_constraint(cc) ((cc)->type == CCONSTR_CORRELATED) typedef struct Chunk Chunk; typedef struct DimensionSlice DimensionSlice; @@ -48,7 +61,8 @@ extern int ts_chunk_constraint_scan_by_dimension_slice_id(int32 dimension_slice_ extern ChunkConstraint *ts_chunk_constraints_add(ChunkConstraints *ccs, int32 chunk_id, int32 dimension_slice_id, const char *constraint_name, - const char *hypertable_constraint_name); + const char *hypertable_constraint_name, + char dimension_type); extern int ts_chunk_constraints_add_dimension_constraints(ChunkConstraints *ccs, int32 chunk_id, const Hypercube *cube); extern TSDLLEXPORT int ts_chunk_constraints_add_inheritable_constraints(ChunkConstraints *ccs, @@ -75,8 +89,9 @@ extern int ts_chunk_constraint_rename_hypertable_constraint(int32 chunk_id, cons const char *new_name); extern int ts_chunk_constraint_adjust_meta(int32 chunk_id, const char *ht_constraint_name, const char *old_name, const char *new_name); -extern TSDLLEXPORT bool ts_chunk_constraint_update_slice_id(int32 chunk_id, int32 old_slice_id, - int32 new_slice_id); +extern TSDLLEXPORT bool ts_chunk_constraint_update_slice_id_name(int32 chunk_id, int32 old_slice_id, + int32 new_slice_id, + const char *new_name); extern char * ts_chunk_constraint_get_name_from_hypertable_constraint(Oid chunk_relid, diff --git a/src/dimension.c b/src/dimension.c index 136796ad63a..98a8791fad4 100644 --- a/src/dimension.c +++ b/src/dimension.c @@ -152,21 +152,47 @@ hyperspace_get_num_dimensions_by_type(Hyperspace *hs, DimensionType type) } static inline DimensionType -dimension_type(TupleInfo *ti) +get_dimension_type(char dimension_type) { - if (slot_attisnull(ti->slot, Anum_dimension_interval_length) && - !slot_attisnull(ti->slot, Anum_dimension_num_slices)) - return DIMENSION_TYPE_CLOSED; - - if (!slot_attisnull(ti->slot, Anum_dimension_interval_length) && - slot_attisnull(ti->slot, Anum_dimension_num_slices)) - return DIMENSION_TYPE_OPEN; + switch (dimension_type) + { + case FD_DIMENSION_OPEN: + return DIMENSION_TYPE_OPEN; + case FD_DIMENSION_CLOSED: + return DIMENSION_TYPE_CLOSED; + case FD_DIMENSION_CORRELATED: + return DIMENSION_TYPE_CORRELATED; + default: + /* should not be possible */ + ereport(ERROR, (errmsg("unexpected dimension type %c", dimension_type))); + break; + } - elog(ERROR, "invalid partitioning dimension"); /* suppress compiler warning on MSVC */ return DIMENSION_TYPE_ANY; } +static inline char +get_dimension_char_type(DimensionType dimension_type) +{ + switch (dimension_type) + { + case DIMENSION_TYPE_OPEN: + return FD_DIMENSION_OPEN; + case DIMENSION_TYPE_CLOSED: + return FD_DIMENSION_CLOSED; + case DIMENSION_TYPE_CORRELATED: + return FD_DIMENSION_CORRELATED; + default: + /* should not be possible */ + ereport(ERROR, (errmsg("unexpected dimension type %d", dimension_type))); + break; + } + + /* suppress compiler warning on MSVC */ + return FD_DIMENSION_ANY; +} + static void dimension_fill_in_from_tuple(Dimension *d, TupleInfo *ti, Oid main_table_relid) { @@ -181,7 +207,6 @@ dimension_fill_in_from_tuple(Dimension *d, TupleInfo *ti, Oid main_table_relid) */ heap_deform_tuple(tuple, ts_scanner_get_tupledesc(ti), values, isnull); - d->type = dimension_type(ti); d->fd.id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_dimension_id)]); d->fd.hypertable_id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_dimension_hypertable_id)]); @@ -190,6 +215,8 @@ dimension_fill_in_from_tuple(Dimension *d, TupleInfo *ti, Oid main_table_relid) DatumGetObjectId(values[AttrNumberGetAttrOffset(Anum_dimension_column_type)]); namestrcpy(&d->fd.column_name, DatumGetCString(values[AttrNumberGetAttrOffset(Anum_dimension_column_name)])); + d->fd.type = DatumGetChar(values[AttrNumberGetAttrOffset(Anum_dimension_type)]); + d->type = get_dimension_type(d->fd.type); if (!isnull[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func_schema)] && !isnull[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func)]) @@ -564,6 +591,49 @@ dimension_scan_internal(ScanKeyData *scankey, int nkeys, tuple_found_func tuple_ return ts_scanner_scan(&scanctx); } +/* + * Check if there are any correlated constraint entries and if yes create + * two new structures in the hypertable + */ +void +ts_correlated_constraints_assign(Hypertable *h, MemoryContext mctx) +{ + int i; + int num_corr_cons = 0; + Hyperspace *hs = h->space, *sec_hs = NULL, *new_hs = NULL; + + for (i = 0; i < hs->num_dimensions; i++) + { + Dimension *dim = &hs->dimensions[i]; + + if (IS_CORRELATED_DIMENSION(dim)) + num_corr_cons++; + } + + if (num_corr_cons == 0) + return; + + Assert(num_corr_cons < hs->num_dimensions); + sec_hs = hyperspace_create(hs->hypertable_id, hs->main_table_relid, num_corr_cons, mctx); + new_hs = hyperspace_create(hs->hypertable_id, + hs->main_table_relid, + hs->num_dimensions - num_corr_cons, + mctx); + + for (i = 0; i < hs->num_dimensions; i++) + { + Dimension *dim = &hs->dimensions[i]; + + if (IS_CORRELATED_DIMENSION(dim)) + sec_hs->dimensions[sec_hs->num_dimensions++] = *dim; + else + new_hs->dimensions[new_hs->num_dimensions++] = *dim; + } + pfree(h->space); + h->space = new_hs; + h->correlated_space = sec_hs; +} + Hyperspace * ts_dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimensions, MemoryContext mctx) @@ -635,6 +705,49 @@ ts_dimension_get_hypertable_id(int32 dimension_id) return -1; } +static ScanTupleResult +dimension_find_dimension_type_tuple_found(TupleInfo *ti, void *data) +{ + char *dimension_type = data; + bool isnull = false; + Datum datum = slot_getattr(ti->slot, Anum_dimension_type, &isnull); + + Assert(!isnull); + *dimension_type = DatumGetChar(datum); + + return SCAN_DONE; +} + +char +ts_dimension_get_dimension_type(int32 dimension_id) +{ + char dimension_type = FD_DIMENSION_ANY; + ScanKeyData scankey[1]; + + /* Perform an index scan dimension_id. */ + ScanKeyInit(&scankey[0], + Anum_dimension_id_idx_id, + BTEqualStrategyNumber, + F_INT4EQ, + Int32GetDatum(dimension_id)); + + dimension_scan_internal(scankey, + 1, + dimension_find_dimension_type_tuple_found, + &dimension_type, + 1, + DIMENSION_ID_IDX, + AccessShareLock, + CurrentMemoryContext); + + /* should never get a type of FD_DIMENSION_ANY */ + if (dimension_type == FD_DIMENSION_ANY) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("got invalid type \"%c\" for dimension %d", dimension_type, dimension_id))); + return dimension_type; +} + DimensionVec * ts_dimension_get_slices(const Dimension *dim) { @@ -782,13 +895,15 @@ dimension_tuple_update(TupleInfo *ti, void *data) static int32 dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid coltype, - int16 num_slices, regproc partitioning_func, int64 interval_length) + int16 num_slices, regproc partitioning_func, int64 interval_length, + DimensionType dim_type) { TupleDesc desc = RelationGetDescr(rel); Datum values[Natts_dimension]; bool nulls[Natts_dimension] = { false }; CatalogSecurityContext sec_ctx; int32 dimension_id; + char dimension_type = FD_DIMENSION_ANY; values[AttrNumberGetAttrOffset(Anum_dimension_hypertable_id)] = Int32GetDatum(hypertable_id); values[AttrNumberGetAttrOffset(Anum_dimension_column_name)] = NameGetDatum(colname); @@ -816,15 +931,19 @@ dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid c values[AttrNumberGetAttrOffset(Anum_dimension_num_slices)] = Int16GetDatum(num_slices); values[AttrNumberGetAttrOffset(Anum_dimension_aligned)] = BoolGetDatum(false); nulls[AttrNumberGetAttrOffset(Anum_dimension_interval_length)] = true; + Assert(dim_type == DIMENSION_TYPE_CLOSED); + dimension_type = FD_DIMENSION_CLOSED; } else { /* Open (time) dimension */ - Assert(num_slices <= 0 && interval_length > 0); + Assert(dim_type == DIMENSION_TYPE_CORRELATED || (num_slices <= 0 && interval_length > 0)); values[AttrNumberGetAttrOffset(Anum_dimension_interval_length)] = Int64GetDatum(interval_length); values[AttrNumberGetAttrOffset(Anum_dimension_aligned)] = BoolGetDatum(true); nulls[AttrNumberGetAttrOffset(Anum_dimension_num_slices)] = true; + dimension_type = + (dim_type == DIMENSION_TYPE_CORRELATED) ? FD_DIMENSION_CORRELATED : FD_DIMENSION_OPEN; } /* no integer_now function by default */ @@ -834,6 +953,9 @@ dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid c /* no compress interval length by default */ nulls[AttrNumberGetAttrOffset(Anum_dimension_compress_interval_length)] = true; + Assert(dimension_type != FD_DIMENSION_ANY); + values[AttrNumberGetAttrOffset(Anum_dimension_type)] = CharGetDatum(dimension_type); + ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); dimension_id = Int32GetDatum(ts_catalog_table_next_seq_id(ts_catalog_get(), DIMENSION)); values[AttrNumberGetAttrOffset(Anum_dimension_id)] = dimension_id; @@ -845,7 +967,7 @@ dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid c static int32 dimension_insert(int32 hypertable_id, Name colname, Oid coltype, int16 num_slices, - regproc partitioning_func, int64 interval_length) + regproc partitioning_func, int64 interval_length, DimensionType dim_type) { Catalog *catalog = ts_catalog_get(); Relation rel; @@ -858,13 +980,14 @@ dimension_insert(int32 hypertable_id, Name colname, Oid coltype, int16 num_slice coltype, num_slices, partitioning_func, - interval_length); + interval_length, + dim_type); table_close(rel, RowExclusiveLock); return dimension_id; } int -ts_dimension_set_type(Dimension *dim, Oid newtype) +ts_dimension_set_column_type(Dimension *dim, Oid newtype) { if (!IS_VALID_OPEN_DIM_TYPE(newtype)) ereport(ERROR, @@ -989,6 +1112,7 @@ ts_hyperspace_calculate_point(const Hyperspace *hs, TupleTableSlot *slot) case DIMENSION_TYPE_CLOSED: p->coordinates[p->num_coords++] = (int64) DatumGetInt32(datum); break; + case DIMENSION_TYPE_CORRELATED: case DIMENSION_TYPE_ANY: elog(ERROR, "invalid dimension type when inserting tuple"); break; @@ -1342,7 +1466,7 @@ dimension_info_validate_open(DimensionInfo *info) { Oid dimtype = info->coltype; - Assert(info->type == DIMENSION_TYPE_OPEN); + Assert(info->type == DIMENSION_TYPE_OPEN || info->type == DIMENSION_TYPE_CORRELATED); if (OidIsValid(info->partitioning_func)) { @@ -1469,6 +1593,7 @@ ts_dimension_info_validate(DimensionInfo *info) dimension_info_validate_closed(info); break; case DIMENSION_TYPE_OPEN: + case DIMENSION_TYPE_CORRELATED: /* needs same initial validation as open dims */ dimension_info_validate_open(info); break; case DIMENSION_TYPE_ANY: @@ -1490,7 +1615,8 @@ ts_dimension_add_from_info(DimensionInfo *info) info->coltype, info->num_slices, info->partitioning_func, - info->interval); + info->interval, + info->type); return info->dimension_id; } @@ -1553,6 +1679,7 @@ dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_gen * 3. Interval for open ('time') dimensions * 4. Partitioning function * 5. IF NOT EXISTS option (bool) + * 6. Is it a correlated constraint? (bool) */ static Datum ts_dimension_add_internal(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_generic) @@ -1585,6 +1712,18 @@ ts_dimension_add_internal(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_ info->ht = ts_hypertable_cache_get_cache_and_entry(info->table_relid, CACHE_FLAG_NONE, &hcache); + /* for correlated constraints the type is INT8OID */ + if (info->type == DIMENSION_TYPE_CORRELATED) + { + if (OidIsValid(info->interval_type)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval type cannot be specified for correlated constraints"))); + + info->interval_type = INT8OID; + info->interval_datum = Int64GetDatum(1); + } + if (info->num_slices_is_set && OidIsValid(info->interval_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -1646,7 +1785,8 @@ ts_dimension_add_internal(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_ chunk->fd.id, slice->fd.id, NULL, - NULL); + NULL, + get_dimension_char_type(info->type)); ts_chunk_constraint_insert(cc); } } @@ -1674,6 +1814,7 @@ ts_dimension_add(PG_FUNCTION_ARGS) .partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4), .if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5), }; + bool correlated = PG_ARGISNULL(6) ? false : PG_GETARG_BOOL(6); TS_PREVENT_FUNC_IF_READ_ONLY(); @@ -1684,6 +1825,9 @@ ts_dimension_add(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("hypertable cannot be NULL"))); + if (correlated) + info.type = DIMENSION_TYPE_CORRELATED; + return ts_dimension_add_internal(fcinfo, &info, false); } @@ -1738,6 +1882,23 @@ ts_dimension_info_out(PG_FUNCTION_ARGS) break; } + case DIMENSION_TYPE_CORRELATED: /* this won't be called via this interface probably */ + { + const char *argvalstr = "-"; + + if (OidIsValid(info->interval_type)) + { + bool isvarlena; + Oid outfuncid; + getTypeOutputInfo(info->interval_type, &outfuncid, &isvarlena); + Assert(OidIsValid(outfuncid)); + argvalstr = OidOutputFunctionCall(outfuncid, info->interval_datum); + } + + appendStringInfo(&str, "correlated//%s//%s", NameStr(info->colname), argvalstr); + break; + } + case DIMENSION_TYPE_ANY: appendStringInfo(&str, "any"); break; diff --git a/src/dimension.h b/src/dimension.h index ef8cba354be..13d4a117807 100644 --- a/src/dimension.h +++ b/src/dimension.h @@ -19,10 +19,19 @@ typedef struct PartitioningInfo PartitioningInfo; typedef struct DimensionSlice DimensionSlice; typedef struct DimensionVec DimensionVec; +/* Valid values for FormData_dimension fd.type */ +#define FD_DIMENSION_OPEN 'o' +#define FD_DIMENSION_CLOSED 'c' +#define FD_DIMENSION_CORRELATED 'C' +#define FD_DIMENSION_CHECK 'h' /* these are not stored in dimension catalog table */ +#define FD_DIMENSION_ANY 'a' + +/* Corresponding enum values */ typedef enum DimensionType { DIMENSION_TYPE_OPEN, DIMENSION_TYPE_CLOSED, + DIMENSION_TYPE_CORRELATED, DIMENSION_TYPE_ANY, } DimensionType; @@ -37,6 +46,7 @@ typedef struct Dimension #define IS_OPEN_DIMENSION(d) ((d)->type == DIMENSION_TYPE_OPEN) #define IS_CLOSED_DIMENSION(d) ((d)->type == DIMENSION_TYPE_CLOSED) +#define IS_CORRELATED_DIMENSION(d) ((d)->type == DIMENSION_TYPE_CORRELATED) #define IS_VALID_OPEN_DIM_TYPE(type) \ (IS_INTEGER_TYPE(type) || IS_TIMESTAMP_TYPE(type) || ts_type_is_int8_binary_compatible(type)) @@ -117,10 +127,13 @@ typedef struct DimensionInfo } DimensionInfo; #define DIMENSION_INFO_IS_SET(di) (di != NULL && OidIsValid((di)->table_relid)) -#define DIMENSION_INFO_IS_VALID(di) (info->num_slices_is_set || OidIsValid(info->interval_type)) +#define DIMENSION_INFO_IS_VALID(di) \ + (info->num_slices_is_set || OidIsValid(info->interval_type) || \ + info->type == DIMENSION_TYPE_CORRELATED) extern Hyperspace *ts_dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimension, MemoryContext mctx); +extern void ts_correlated_constraints_assign(Hypertable *h, MemoryContext mctx); extern DimensionSlice *ts_dimension_calculate_default_slice(const Dimension *dim, int64 value); extern TSDLLEXPORT Point *ts_hyperspace_calculate_point(const Hyperspace *h, TupleTableSlot *slot); extern int ts_dimension_get_slice_ordinal(const Dimension *dim, const DimensionSlice *slice); @@ -135,7 +148,8 @@ extern TSDLLEXPORT Dimension * ts_hyperspace_get_mutable_dimension_by_name(Hyperspace *hs, DimensionType type, const char *name); extern DimensionVec *ts_dimension_get_slices(const Dimension *dim); extern int32 ts_dimension_get_hypertable_id(int32 dimension_id); -extern int ts_dimension_set_type(Dimension *dim, Oid newtype); +extern char ts_dimension_get_dimension_type(int32 dimension_id); +extern int ts_dimension_set_column_type(Dimension *dim, Oid newtype); extern TSDLLEXPORT Oid ts_dimension_get_partition_type(const Dimension *dim); extern int ts_dimension_set_name(Dimension *dim, const char *newname); extern int ts_dimension_set_chunk_interval(Dimension *dim, int64 chunk_interval); diff --git a/src/dimension_slice.c b/src/dimension_slice.c index 826723bbdd1..080bae4b193 100644 --- a/src/dimension_slice.c +++ b/src/dimension_slice.c @@ -522,6 +522,48 @@ ts_dimension_slice_collision_scan_limit(int32 dimension_id, int64 range_start, i return ts_dimension_vec_sort(&slices); } +/* + * Scan for a slice that is exactly equal to the given range. + * + * Returns a dimension vector of the matching slice if any. + */ +DimensionVec * +ts_dimension_slice_equal_scan(int32 dimension_id, int64 range_start, int64 range_end) +{ + ScanKeyData scankey[3]; + DimensionVec *slice = ts_dimension_vec_create(1); /* maximum 1 dimension slice expected */ + + ScanKeyInit(&scankey[0], + Anum_dimension_slice_dimension_id_range_start_range_end_idx_dimension_id, + BTEqualStrategyNumber, + F_INT4EQ, + Int32GetDatum(dimension_id)); + + ScanKeyInit(&scankey[1], + Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_start, + BTEqualStrategyNumber, + F_INT8EQ, + Int64GetDatum(range_start)); + + ScanKeyInit(&scankey[2], + Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_end, + BTEqualStrategyNumber, + F_INT8EQ, + Int64GetDatum(range_end)); + + dimension_slice_scan_limit_internal(DIMENSION_SLICE_DIMENSION_ID_RANGE_START_RANGE_END_IDX, + scankey, + 3, + dimension_vec_tuple_found, + &slice, + 1, + AccessShareLock, + NULL, + CurrentMemoryContext); + + return slice; +} + DimensionVec * ts_dimension_slice_scan_by_dimension(int32 dimension_id, int limit) { @@ -983,6 +1025,220 @@ ts_dimension_slice_insert_multi(DimensionSlice **slices, Size num_slices) return n; } +/* + * Insert correlated constraint dimension slices into the catalog. + * + * Create entries with MINVALUE/MAXVALUE as ranges for correlated + * constraints and insert these entries. This allows for all the + * chunks to be picked up when queries use correlated columns in + * WHERE clauses. + * + * Also, add these to the chunk's constraint lists so that they + * eventually get added into the metadate in this same transaction + * + * We will asynchronously convert the entries into proper ranges + * later + * + * Returns the number of slices inserted. + */ +int +ts_correlated_constraints_dimension_slice_insert(const Hypertable *ht, Chunk *chunk) +{ + Catalog *catalog; + Relation rel; + Size i = 0; + Hyperspace *hs = ht->correlated_space; + + if (hs == NULL) + return i; + + catalog = ts_catalog_get(); + rel = table_open(catalog_get_table_id(catalog, DIMENSION_SLICE), RowExclusiveLock); + + for (i = 0; i < hs->num_dimensions; i++) + { + Dimension *dim = &hs->dimensions[i]; + DimensionSlice *slice; + DimensionVec *vec; + + /* Check if an entry already exists first */ + vec = ts_dimension_slice_equal_scan(dim->fd.id, + DIMENSION_SLICE_MINVALUE, + DIMENSION_SLICE_MAXVALUE); + /* Add a new entry if none exists */ + if (vec->num_slices == 0) + { + slice = ts_dimension_slice_create(dim->fd.id, + DIMENSION_SLICE_MINVALUE, + DIMENSION_SLICE_MAXVALUE); + dimension_slice_insert_relation(rel, slice); + } + else + { + Assert(vec->num_slices == 1); + slice = vec->slices[0]; + } + + ts_chunk_constraints_add(chunk->constraints, + chunk->fd.id, + slice->fd.id, + NULL, + NULL, + FD_DIMENSION_CORRELATED); + } + + table_close(rel, RowExclusiveLock); + + return i; +} + +/* + * Update correlated constraints dimension slices in the catalog for the + * provided chunk (it's assumed that the chunk is locked + * appropriately). + * + * Calculate actual ranges for the given chunk for correlated + * constraints and update these entries. This allows for the + * chunk to be picked up when queries use correlated columns in + * WHERE clauses with these ranges. + * + * If "reset" is specified then we change the range back to MINVALUE + * and MAXVALUE + * + * Returns the number of slices updated. + */ +int +ts_correlated_constraints_dimension_slice_calculate_update(const Hypertable *ht, Chunk *chunk, + bool reset) +{ + Size i = 0; + Hyperspace *hs = ht->correlated_space; + ChunkConstraints *constraints = chunk->constraints; + MemoryContext work_mcxt, orig_mcxt; + + /* quick check for correlated constraints. Bail out early if none */ + if (hs == NULL) + return i; + + work_mcxt = + AllocSetContextCreate(CurrentMemoryContext, "dimension-slice-work", ALLOCSET_DEFAULT_SIZES); + orig_mcxt = MemoryContextSwitchTo(work_mcxt); + + for (int constraint_index = 0; constraint_index < constraints->num_constraints; + constraint_index++) + { + int slice_id; + DimensionSlice *slice; + ChunkConstraint *constraint = &constraints->constraints[constraint_index]; + Datum minmax[2]; + AttrNumber attno; + TupleInfo *ti; + ScanIterator slice_iterator; + const Dimension *dim; + + if (!is_correlated_constraint(constraint)) + continue; + + /* + * Find the slice by id. Don't have to lock it because the chunk is + * assumed to be locked when this function is called. + */ + slice_id = constraint->fd.dimension_slice_id; + slice_iterator = ts_dimension_slice_scan_iterator_create(NULL, orig_mcxt); + ts_dimension_slice_scan_iterator_set_slice_id(&slice_iterator, slice_id, NULL); + ts_scan_iterator_start_scan(&slice_iterator); + ti = ts_scan_iterator_next(&slice_iterator); + if (ti == NULL) + ereport(ERROR, errmsg("dimension slice %d not found!", slice_id)); + + slice = ts_dimension_slice_from_tuple(ti); + + /* get the dimension from the correlated subspace. It has to exist */ + dim = ts_hyperspace_get_dimension_by_id(ht->correlated_space, slice->fd.dimension_id); + if (dim == NULL) + ereport(ERROR, + errmsg("correlated constraint dimension %d not found!", + slice->fd.dimension_id)); + + attno = ts_map_attno(ht->main_table_relid, chunk->table_id, dim->column_attno); + + /* calculate the min/max for this correlated constraint attribute on this chunk */ + if (reset || ts_chunk_get_minmax(chunk->table_id, + dim->fd.column_type, + attno, + "correlated constraint", + minmax)) + { + DimensionSlice *new_slice; + DimensionVec *vec; + int64 min = reset ? DIMENSION_SLICE_MINVALUE : + ts_time_value_to_internal(minmax[0], dim->fd.column_type); + int64 max = reset ? DIMENSION_SLICE_MAXVALUE : + ts_time_value_to_internal(minmax[1], dim->fd.column_type); + + /* The end value is exclusive to the range, so incr by 1 */ + if (max != DIMENSION_SLICE_MAXVALUE) + { + max++; + /* Again, check overflow */ + max = REMAP_LAST_COORDINATE(max); + } + + /* + * It's possible that an existing chunk might have the SAME values for this + * correlated constraint due to which an entry might already exist in the + * dimension_slice catalog table. So check for that. + */ + + vec = ts_dimension_slice_equal_scan(dim->fd.id, min, max); + + /* Add a new entry if none exists */ + if (vec->num_slices == 0) + { + new_slice = ts_dimension_slice_create(dim->fd.id, min, max); + dimension_slice_insert(new_slice); + } + else + { + Assert(vec->num_slices == 1); + new_slice = vec->slices[0]; + } + + /* Generate a new name for this constraint using the new slice id */ + snprintf(NameStr(constraint->fd.constraint_name), + NAMEDATALEN, + "%sconstraint_%d", + CC_DIM_PREFIX, + new_slice->fd.id); + + /* Update the existing chunk constraint entry to point to this new slice */ + ts_chunk_constraint_update_slice_id_name(chunk->fd.id, + slice->fd.id, + new_slice->fd.id, + NameStr(constraint->fd.constraint_name)); + + /* + * Adjust the ChunkConstraint entry with this new slice id. It's already + * reflected in the catalog via the above call + */ + constraint->fd.dimension_slice_id = new_slice->fd.id; + + i++; + } + else + ereport(WARNING, + errmsg("unable to calculate min/max values for correlated constraints")); + + ts_scan_iterator_end(&slice_iterator); + ts_scan_iterator_close(&slice_iterator); + } + + MemoryContextSwitchTo(orig_mcxt); + MemoryContextDelete(work_mcxt); + + return i; +} + void ts_dimension_slice_insert(DimensionSlice *slice) { diff --git a/src/dimension_slice.h b/src/dimension_slice.h index 6c5fac6ce4b..b3f30705b28 100644 --- a/src/dimension_slice.h +++ b/src/dimension_slice.h @@ -53,6 +53,8 @@ ts_dimension_slice_scan_range_limit(int32 dimension_id, StrategyNumber start_str int limit, const ScanTupLock *tuplock); extern DimensionVec *ts_dimension_slice_collision_scan_limit(int32 dimension_id, int64 range_start, int64 range_end, int limit); +extern DimensionVec *ts_dimension_slice_equal_scan(int32 dimension_id, int64 range_start, + int64 range_end); extern bool ts_dimension_slice_scan_for_existing(const DimensionSlice *slice, const ScanTupLock *tuplock); extern DimensionSlice *ts_dimension_slice_scan_by_id_and_lock(int32 dimension_slice_id, @@ -77,6 +79,7 @@ extern bool ts_dimension_slice_cut(DimensionSlice *to_cut, const DimensionSlice extern void ts_dimension_slice_free(DimensionSlice *slice); extern int ts_dimension_slice_insert_multi(DimensionSlice **slice, Size num_slices); extern void ts_dimension_slice_insert(DimensionSlice *slice); +extern int ts_correlated_constraints_dimension_slice_insert(const Hypertable *ht, Chunk *chunk); extern int ts_dimension_slice_cmp(const DimensionSlice *left, const DimensionSlice *right); extern int ts_dimension_slice_cmp_coordinate(const DimensionSlice *slice, int64 coord); @@ -87,6 +90,9 @@ extern TSDLLEXPORT int32 ts_dimension_slice_oldest_valid_chunk_for_reorder( extern TSDLLEXPORT List *ts_dimension_slice_get_chunkids_to_compress( int32 dimension_id, StrategyNumber start_strategy, int64 start_value, StrategyNumber end_strategy, int64 end_value, bool compress, bool recompress, int32 numchunks); +extern TSDLLEXPORT int +ts_correlated_constraints_dimension_slice_calculate_update(const Hypertable *ht, Chunk *chunk, + bool reset); extern DimensionSlice *ts_dimension_slice_from_tuple(TupleInfo *ti); extern ScanIterator ts_dimension_slice_scan_iterator_create(const ScanTupLock *tuplock, diff --git a/src/hypertable.c b/src/hypertable.c index 1199ae96d63..ed88d47e5d5 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -242,6 +242,8 @@ ts_hypertable_from_tupleinfo(const TupleInfo *ti) h->main_table_relid = ts_get_relation_relid(NameStr(h->fd.schema_name), NameStr(h->fd.table_name), true); h->space = ts_dimension_scan(h->fd.id, h->main_table_relid, h->fd.num_dimensions, ti->mctx); + /* check and assign correlated constraint */ + ts_correlated_constraints_assign(h, ti->mctx); h->chunk_cache = ts_subspace_store_init(h->space, ti->mctx, ts_guc_max_cached_chunks_per_hypertable); h->chunk_sizing_func = get_chunk_sizing_func_oid(&h->fd); diff --git a/src/hypertable.h b/src/hypertable.h index 7e0ea5cf5ae..30f6ac08f58 100644 --- a/src/hypertable.h +++ b/src/hypertable.h @@ -51,6 +51,7 @@ typedef struct Hypertable Oid main_table_relid; Oid chunk_sizing_func; Hyperspace *space; + Hyperspace *correlated_space; SubspaceStore *chunk_cache; /* * Allows restricting the data nodes to use for the hypertable. Default is to diff --git a/src/hypertable_restrict_info.c b/src/hypertable_restrict_info.c index b00c38f20fd..f69a6bb4cec 100644 --- a/src/hypertable_restrict_info.c +++ b/src/hypertable_restrict_info.c @@ -85,6 +85,7 @@ dimension_restrict_info_create(const Dimension *d) switch (d->type) { case DIMENSION_TYPE_OPEN: + case DIMENSION_TYPE_CORRELATED: return &dimension_restrict_info_open_create(d)->base; case DIMENSION_TYPE_CLOSED: return &dimension_restrict_info_closed_create(d)->base; @@ -104,6 +105,7 @@ dimension_restrict_info_is_trivial(const DimensionRestrictInfo *dri) switch (dri->dimension->type) { case DIMENSION_TYPE_OPEN: + case DIMENSION_TYPE_CORRELATED: { DimensionRestrictInfoOpen *open = (DimensionRestrictInfoOpen *) dri; return open->lower_strategy == InvalidStrategy && @@ -247,6 +249,7 @@ dimension_restrict_info_add(DimensionRestrictInfo *dri, int strategy, Oid collat switch (dri->dimension->type) { case DIMENSION_TYPE_OPEN: + case DIMENSION_TYPE_CORRELATED: return dimension_restrict_info_open_add((DimensionRestrictInfoOpen *) dri, strategy, collation, @@ -275,18 +278,28 @@ typedef struct HypertableRestrictInfo HypertableRestrictInfo * ts_hypertable_restrict_info_create(RelOptInfo *rel, Hypertable *ht) { - int num_dimensions = ht->space->num_dimensions; + int num_dimensions = ht->space->num_dimensions + + (ht->correlated_space ? ht->correlated_space->num_dimensions : 0); HypertableRestrictInfo *res = palloc0(sizeof(HypertableRestrictInfo) + sizeof(DimensionRestrictInfo *) * num_dimensions); int i; + int sec_index = 0; res->num_dimensions = num_dimensions; - for (i = 0; i < num_dimensions; i++) + for (i = 0; i < ht->space->num_dimensions; i++) { DimensionRestrictInfo *dri = dimension_restrict_info_create(&ht->space->dimensions[i]); res->dimension_restriction[i] = dri; + sec_index++; + } + for (i = 0; ht->correlated_space != NULL && i < ht->correlated_space->num_dimensions; i++) + { + DimensionRestrictInfo *dri = + dimension_restrict_info_create(&ht->correlated_space->dimensions[i]); + + res->dimension_restriction[sec_index++] = dri; } return res; @@ -540,6 +553,7 @@ gather_restriction_dimension_vectors(const HypertableRestrictInfo *hri) switch (dri->dimension->type) { case DIMENSION_TYPE_OPEN: + case DIMENSION_TYPE_CORRELATED: { const DimensionRestrictInfoOpen *open = (const DimensionRestrictInfoOpen *) dri; diff --git a/src/process_utility.c b/src/process_utility.c index bd7140df473..11c18af91d0 100644 --- a/src/process_utility.c +++ b/src/process_utility.c @@ -3093,7 +3093,7 @@ process_alter_column_type_end(Hypertable *ht, AlterTableCmd *cmd) if (NULL == dim) return; - ts_dimension_set_type(dim, new_type); + ts_dimension_set_column_type(dim, new_type); ts_process_utility_set_expect_chunk_modification(true); ts_chunk_recreate_all_constraints_for_dimension(ht, dim->fd.id); ts_process_utility_set_expect_chunk_modification(false); diff --git a/src/ts_catalog/catalog.h b/src/ts_catalog/catalog.h index b51d371ef8f..f179bea3e21 100644 --- a/src/ts_catalog/catalog.h +++ b/src/ts_catalog/catalog.h @@ -186,6 +186,7 @@ enum Anum_dimension Anum_dimension_compress_interval_length, Anum_dimension_integer_now_func_schema, Anum_dimension_integer_now_func, + Anum_dimension_type, _Anum_dimension_max, }; @@ -207,6 +208,7 @@ typedef struct FormData_dimension int64 compress_interval_length; NameData integer_now_func_schema; NameData integer_now_func; + char type; } FormData_dimension; typedef FormData_dimension *Form_dimension; diff --git a/test/expected/chunk_adaptive.out b/test/expected/chunk_adaptive.out index 2349120cef3..002b2dbceda 100644 --- a/test/expected/chunk_adaptive.out +++ b/test/expected/chunk_adaptive.out @@ -89,9 +89,9 @@ FROM _timescaledb_catalog.hypertable; -- Check that adaptive chunking sets a 1 day default chunk time -- interval => 86400000000 microseconds SELECT * FROM _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ - 2 | 2 | time | timestamp with time zone | t | | | | 86400000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 2 | 2 | time | timestamp with time zone | t | | | | 86400000000 | | | | o (1 row) -- Change the target size diff --git a/test/expected/create_chunks.out b/test/expected/create_chunks.out index 1b21bd9497c..5a4e3671008 100644 --- a/test/expected/create_chunks.out +++ b/test/expected/create_chunks.out @@ -82,10 +82,10 @@ SELECT set_chunk_time_interval('chunk_test', 5::bigint); (1 row) SELECT * FROM _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 2 | 1 | tag | integer | f | 3 | _timescaledb_functions | get_partition_hash | | | | - 1 | 1 | time | integer | t | | | | 5 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 2 | 1 | tag | integer | f | 3 | _timescaledb_functions | get_partition_hash | | | | | c + 1 | 1 | time | integer | t | | | | 5 | | | | o (2 rows) INSERT INTO chunk_test VALUES (7, 24.3, 79669, 1); diff --git a/test/expected/create_hypertable.out b/test/expected/create_hypertable.out index d79eeb6a1a7..1bd08b828c0 100644 --- a/test/expected/create_hypertable.out +++ b/test/expected/create_hypertable.out @@ -123,13 +123,13 @@ select * from _timescaledb_catalog.hypertable where table_name = 'test_table'; (1 row) select * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | bigint | t | | | | 2592000000000 | | | - 2 | 1 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 3 | 2 | time | bigint | t | | | | 2592000000000 | | | - 4 | 2 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 5 | 2 | location | text | f | 4 | _timescaledb_functions | get_partition_hash | | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | bigint | t | | | | 2592000000000 | | | | o + 2 | 1 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 3 | 2 | time | bigint | t | | | | 2592000000000 | | | | o + 4 | 2 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 5 | 2 | location | text | f | 4 | _timescaledb_functions | get_partition_hash | | | | | c (5 rows) --test that we can change the number of partitions and that 1 is allowed @@ -140,9 +140,9 @@ SELECT set_number_partitions('test_schema.test_table', 1, 'location'); (1 row) select * from _timescaledb_catalog.dimension WHERE column_name = 'location'; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 5 | 2 | location | text | f | 1 | _timescaledb_functions | get_partition_hash | | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 5 | 2 | location | text | f | 1 | _timescaledb_functions | get_partition_hash | | | | | c (1 row) SELECT set_number_partitions('test_schema.test_table', 2, 'location'); @@ -152,9 +152,9 @@ SELECT set_number_partitions('test_schema.test_table', 2, 'location'); (1 row) select * from _timescaledb_catalog.dimension WHERE column_name = 'location'; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 5 | 2 | location | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 5 | 2 | location | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c (1 row) \set ON_ERROR_STOP 0 @@ -186,14 +186,14 @@ select * from _timescaledb_catalog.hypertable where table_name = 'test_table'; (1 row) select * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | bigint | t | | | | 2592000000000 | | | - 2 | 1 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 3 | 2 | time | bigint | t | | | | 2592000000000 | | | - 4 | 2 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 5 | 2 | location | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 6 | 2 | id | integer | t | | | | 1000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | bigint | t | | | | 2592000000000 | | | | o + 2 | 1 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 3 | 2 | time | bigint | t | | | | 2592000000000 | | | | o + 4 | 2 | device_id | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 5 | 2 | location | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 6 | 2 | id | integer | t | | | | 1000 | | | | o (6 rows) -- Test add_dimension: can use interval types for TIMESTAMPTZ columns @@ -788,9 +788,9 @@ select set_integer_now_func('test_table_int', 'dummy_now'); (1 row) select * from _timescaledb_catalog.dimension WHERE hypertable_id = :TEST_TABLE_INT_HYPERTABLE_ID; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ - 29 | 17 | time | bigint | t | | | | 1 | | public | dummy_now + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 29 | 17 | time | bigint | t | | | | 1 | | public | dummy_now | o (1 row) -- show chunks works with "created_before" and errors out with time used in "older_than" @@ -826,9 +826,9 @@ select set_integer_now_func('test_table_int', 'my_user_schema.dummy_now4', repla \c :TEST_DBNAME :ROLE_SUPERUSER ALTER SCHEMA my_user_schema RENAME TO my_new_schema; select * from _timescaledb_catalog.dimension WHERE hypertable_id = :TEST_TABLE_INT_HYPERTABLE_ID; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ - 29 | 17 | time | bigint | t | | | | 1 | | my_new_schema | dummy_now4 + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 29 | 17 | time | bigint | t | | | | 1 | | my_new_schema | dummy_now4 | o (1 row) -- github issue #4650 diff --git a/test/expected/ddl-13.out b/test/expected/ddl-13.out index d65b7532929..b53dbb7dbfa 100644 --- a/test/expected/ddl-13.out +++ b/test/expected/ddl-13.out @@ -422,10 +422,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show the column name and type of the partitioning dimension in the -- metadata table SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | | o (2 rows) EXPLAIN (costs off) @@ -476,10 +476,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show that the metadata has been updated SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | | o (2 rows) -- constraint exclusion should still work with updated column diff --git a/test/expected/ddl-14.out b/test/expected/ddl-14.out index ab3abf144d7..3bd8deb39de 100644 --- a/test/expected/ddl-14.out +++ b/test/expected/ddl-14.out @@ -422,10 +422,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show the column name and type of the partitioning dimension in the -- metadata table SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | | o (2 rows) EXPLAIN (costs off) @@ -476,10 +476,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show that the metadata has been updated SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | | o (2 rows) -- constraint exclusion should still work with updated column diff --git a/test/expected/ddl-15.out b/test/expected/ddl-15.out index ab3abf144d7..3bd8deb39de 100644 --- a/test/expected/ddl-15.out +++ b/test/expected/ddl-15.out @@ -422,10 +422,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show the column name and type of the partitioning dimension in the -- metadata table SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | | o (2 rows) EXPLAIN (costs off) @@ -476,10 +476,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show that the metadata has been updated SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | | o (2 rows) -- constraint exclusion should still work with updated column diff --git a/test/expected/ddl-16.out b/test/expected/ddl-16.out index ab3abf144d7..3bd8deb39de 100644 --- a/test/expected/ddl-16.out +++ b/test/expected/ddl-16.out @@ -422,10 +422,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show the column name and type of the partitioning dimension in the -- metadata table SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | | o (2 rows) EXPLAIN (costs off) @@ -476,10 +476,10 @@ SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); -- show that the metadata has been updated SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | | o (2 rows) -- constraint exclusion should still work with updated column diff --git a/test/expected/drop_hypertable.out b/test/expected/drop_hypertable.out index 01e80dbeb91..6da4671a082 100644 --- a/test/expected/drop_hypertable.out +++ b/test/expected/drop_hypertable.out @@ -7,8 +7,8 @@ SELECT * from _timescaledb_catalog.hypertable; (0 rows) SELECT * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ (0 rows) CREATE TABLE should_drop (time timestamp, temp float8); @@ -82,9 +82,9 @@ SELECT * from _timescaledb_catalog.hypertable; (1 row) SELECT * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | timestamp without time zone | t | | | | 604800000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | timestamp without time zone | t | | | | 604800000000 | | | | o (1 row) DROP TABLE should_drop; @@ -105,8 +105,8 @@ SELECT * from _timescaledb_catalog.hypertable; (1 row) SELECT * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ - 4 | 4 | time | timestamp without time zone | t | | | | 604800000000 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 4 | 4 | time | timestamp without time zone | t | | | | 604800000000 | | | | o (1 row) diff --git a/test/expected/drop_owned.out b/test/expected/drop_owned.out index 204e4aa4daf..a67d8cdd87d 100644 --- a/test/expected/drop_owned.out +++ b/test/expected/drop_owned.out @@ -64,8 +64,8 @@ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, (0 rows) SELECT * FROM _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ (0 rows) SELECT * FROM _timescaledb_catalog.dimension_slice; diff --git a/test/expected/drop_schema.out b/test/expected/drop_schema.out index 13afab2eda6..acfc34c53e7 100644 --- a/test/expected/drop_schema.out +++ b/test/expected/drop_schema.out @@ -96,8 +96,8 @@ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, (0 rows) SELECT * FROM _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ (0 rows) SELECT * FROM _timescaledb_catalog.dimension_slice; diff --git a/test/expected/partition.out b/test/expected/partition.out index d660bfa4084..27500289f09 100644 --- a/test/expected/partition.out +++ b/test/expected/partition.out @@ -11,10 +11,10 @@ NOTICE: adding not-null constraint to column "time" -- Show legacy partitioning function is used SELECT * FROM _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | - 2 | 1 | device | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 2 | 1 | device | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | | c (2 rows) INSERT INTO part_legacy VALUES ('2017-03-22T09:18:23', 23.4, 1); @@ -54,12 +54,12 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT * FROM _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | - 2 | 1 | device | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | - 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | | - 4 | 2 | device | integer | f | 2 | _timescaledb_functions | get_partition_hash | | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 2 | 1 | device | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | | c + 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 4 | 2 | device | integer | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c (4 rows) INSERT INTO part_new VALUES ('2017-03-22T09:18:23', 23.4, 1); @@ -133,17 +133,17 @@ SELECT add_dimension('part_add_dim', 'location', 2, partitioning_func => '_times (1 row) SELECT * FROM _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | - 2 | 1 | device | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | - 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | | - 4 | 2 | device | integer | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 6 | 3 | temp | double precision | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 5 | 3 | time | timestamp without time zone | t | | | | 604800000000 | | | - 7 | 4 | time | timestamp with time zone | t | | | | 604800000000 | | | - 8 | 4 | temp | double precision | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 9 | 4 | location | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 2 | 1 | device | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | | c + 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 4 | 2 | device | integer | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 6 | 3 | temp | double precision | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 5 | 3 | time | timestamp without time zone | t | | | | 604800000000 | | | | o + 7 | 4 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 8 | 4 | temp | double precision | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 9 | 4 | location | integer | f | 2 | _timescaledb_functions | get_partition_for_key | | | | | c (9 rows) -- Test that we support custom SQL-based partitioning functions and diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c index 1255301d106..23fb6652fb6 100644 --- a/tsl/src/compression/api.c +++ b/tsl/src/compression/api.c @@ -455,6 +455,15 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid) int insert_options = new_compressed_chunk ? HEAP_INSERT_FROZEN : 0; before_size = ts_relation_size_impl(cxt.srcht_chunk->table_id); + /* + * Calculate and add the correlated constraint dimension ranges for the src chunk. This has to + * be done before the compression + */ + if (cxt.srcht->correlated_space) + ts_correlated_constraints_dimension_slice_calculate_update(cxt.srcht, + cxt.srcht_chunk, + false); + cstat = compress_chunk(cxt.srcht_chunk->table_id, compress_ht_chunk->table_id, insert_options); /* Drop all FK constraints on the uncompressed chunk. This is needed to allow @@ -462,6 +471,7 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid) * directly on the hypertable or chunks. */ ts_chunk_drop_fks(cxt.srcht_chunk); + after_size = ts_relation_size_impl(compress_ht_chunk->table_id); if (new_compressed_chunk) @@ -596,6 +606,12 @@ decompress_chunk_impl(Chunk *uncompressed_chunk, bool if_compressed) decompress_chunk(compressed_chunk->table_id, uncompressed_chunk->table_id); + /* reset any correlated constraints after the decompression */ + if (uncompressed_hypertable->correlated_space) + ts_correlated_constraints_dimension_slice_calculate_update(uncompressed_hypertable, + uncompressed_chunk, + true); + /* Recreate FK constraints, since they were dropped during compression. */ ts_chunk_create_fks(uncompressed_hypertable, uncompressed_chunk); diff --git a/tsl/src/compression/create.c b/tsl/src/compression/create.c index ae0c1367382..9ff72d06b5d 100644 --- a/tsl/src/compression/create.c +++ b/tsl/src/compression/create.c @@ -44,6 +44,7 @@ #include "compression_with_clause.h" #include "compression.h" #include "compression/compression_storage.h" +#include "dimension_slice.h" #include "hypertable_cache.h" #include "custom_type_cache.h" #include "trigger.h" diff --git a/tsl/test/expected/bgw_policy.out b/tsl/test/expected/bgw_policy.out index d51188df0dd..094683af99b 100644 --- a/tsl/test/expected/bgw_policy.out +++ b/tsl/test/expected/bgw_policy.out @@ -588,14 +588,14 @@ select remove_retention_policy('part_time_now_func'); alter function dummy_now() rename to dummy_now_renamed; alter schema public rename to new_public; select * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | - 2 | 1 | chunk_id | integer | f | 2 | _timescaledb_functions | get_partition_hash | | | | - 5 | 4 | time | timestamp without time zone | t | | | | 1 | | | - 6 | 5 | time | double precision | t | | new_public | time_partfunc | 1 | | new_public | dummy_now - 3 | 2 | time | bigint | t | | | | 1 | | new_public | nowstamp - 4 | 3 | time | smallint | t | | | | 1 | | new_public | overflow_now + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 2 | 1 | chunk_id | integer | f | 2 | _timescaledb_functions | get_partition_hash | | | | | c + 5 | 4 | time | timestamp without time zone | t | | | | 1 | | | | o + 6 | 5 | time | double precision | t | | new_public | time_partfunc | 1 | | new_public | dummy_now | o + 3 | 2 | time | bigint | t | | | | 1 | | new_public | nowstamp | o + 4 | 3 | time | smallint | t | | | | 1 | | new_public | overflow_now | o (6 rows) alter schema new_public rename to public; diff --git a/tsl/test/expected/cagg_migrate.out b/tsl/test/expected/cagg_migrate.out index 63ca4456442..59fcaf162ac 100644 --- a/tsl/test/expected/cagg_migrate.out +++ b/tsl/test/expected/cagg_migrate.out @@ -157,7 +157,7 @@ UNION ALL WHERE (conditions."time" >= COALESCE((_timescaledb_functions.cagg_watermark(4))::integer, '-2147483648'::integer)) GROUP BY (public.time_bucket(168, conditions."time")); COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, status) FROM stdin; -COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func, type) FROM stdin; COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_bucket_function (mat_hypertable_id, bucket_func, bucket_width, bucket_fixed_width) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; @@ -1026,7 +1026,7 @@ UNION ALL WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(8)), '-infinity'::timestamp without time zone)) GROUP BY (public.time_bucket('7 days'::interval, conditions."time")); COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, status) FROM stdin; -COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func, type) FROM stdin; COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_bucket_function (mat_hypertable_id, bucket_func, bucket_width, bucket_fixed_width) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; @@ -1872,7 +1872,7 @@ UNION ALL WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(12)), '-infinity'::timestamp with time zone)) GROUP BY (public.time_bucket('7 days'::interval, conditions."time")); COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, status) FROM stdin; -COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func, type) FROM stdin; COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_bucket_function (mat_hypertable_id, bucket_func, bucket_width, bucket_fixed_width) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; @@ -2708,7 +2708,7 @@ UNION ALL WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp_without_timezone(_timescaledb_functions.cagg_watermark(8)), '-infinity'::timestamp without time zone)) GROUP BY (public.time_bucket('7 days'::interval, conditions."time")); COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, status) FROM stdin; -COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func, type) FROM stdin; COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_bucket_function (mat_hypertable_id, bucket_func, bucket_width, bucket_fixed_width) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; @@ -2908,7 +2908,7 @@ UNION ALL WHERE (conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(12)), '-infinity'::timestamp with time zone)) GROUP BY (public.time_bucket('7 days'::interval, conditions."time")); COPY _timescaledb_catalog.hypertable (id, schema_name, table_name, associated_schema_name, associated_table_prefix, num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size, compression_state, compressed_hypertable_id, status) FROM stdin; -COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func) FROM stdin; +COPY _timescaledb_catalog.dimension (id, hypertable_id, column_name, column_type, aligned, num_slices, partitioning_func_schema, partitioning_func, interval_length, compress_interval_length, integer_now_func_schema, integer_now_func, type) FROM stdin; COPY _timescaledb_catalog.continuous_agg (mat_hypertable_id, raw_hypertable_id, parent_mat_hypertable_id, user_view_schema, user_view_name, partial_view_schema, partial_view_name, direct_view_schema, direct_view_name, materialized_only, finalized) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_bucket_function (mat_hypertable_id, bucket_func, bucket_width, bucket_fixed_width) FROM stdin; COPY _timescaledb_catalog.continuous_aggs_invalidation_threshold (hypertable_id, watermark) FROM stdin; diff --git a/tsl/test/expected/correlated_constraints.out b/tsl/test/expected/correlated_constraints.out new file mode 100644 index 00000000000..c673d1f983c --- /dev/null +++ b/tsl/test/expected/correlated_constraints.out @@ -0,0 +1,227 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set PREFIX 'EXPLAIN (costs off, timing off, summary off)' +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name || '.' || c.table_name as chunk_name, + c.status as chunk_status +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE sample_table ( + time TIMESTAMP WITH TIME ZONE NOT NULL, + sensor_id INTEGER NOT NULL, + cpu double precision null, + temperature double precision null, + name varchar(100) default 'this is a default string value' +); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '2 months'); +WARNING: column type "character varying" used for "name" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 1 | public | sample_table | t +(1 row) + +\set start_date '2022-01-28 01:09:53.583252+05:30' +INSERT INTO sample_table + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date'::timestamptz - INTERVAL '1 months', + :'start_date'::timestamptz - INTERVAL '1 week', + INTERVAL '1 hour') AS g1(time), + generate_series(1, 8, 1 ) AS g2(sensor_id) + ORDER BY + time; +\set start_date '2023-03-17 17:51:11.322998+05:30' +-- insert into new chunks +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 21.98, 33.123, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 17.66, 13.875, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 13, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 9, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 14, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 15, 0.988, 33.123, 'new row3'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 16, 4.6554, 47, 'new row3'); +-- Non-int, date, timestamp cannot be specified as a correlated constraint for now +-- We could expand to FLOATs, NUMERICs later +\set ON_ERROR_STOP 0 +SELECT * FROM add_dimension('sample_table', 'name', correlated => true); +ERROR: invalid type for dimension "name" +\set ON_ERROR_STOP 1 +-- Specify a correlated constraint +SELECT * FROM add_dimension('sample_table', 'sensor_id', correlated => true); + dimension_id | schema_name | table_name | column_name | created +--------------+-------------+--------------+-------------+--------- + 2 | public | sample_table | sensor_id | t +(1 row) + +-- The above should add a dimension_slice entry with MIN/MAX int64 entries and all +-- existing chunks will point to this to indicate that there is no chunk exclusion +-- yet if this correlated constraint column is used in WHERE clauses +SELECT id AS dimension_id FROM _timescaledb_catalog.dimension WHERE type = 'C' \gset +-- should show MIN_INT/MAX_INT entries +SELECT * FROM _timescaledb_catalog.dimension WHERE type = 'C' AND id = :dimension_id; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 2 | 1 | sensor_id | integer | t | | | | 1 | | | | C +(1 row) + +SELECT id AS slice_id FROM _timescaledb_catalog.dimension_slice WHERE dimension_id = :dimension_id \gset +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE dimension_slice_id = :slice_id ORDER BY chunk_id; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-------------------+---------------------------- + 1 | 3 | _$CC_constraint_3 | + 2 | 3 | _$CC_constraint_3 | +(2 rows) + +-- A query using a WHERE clause on "sensor_id" column will scan all the chunks +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (sensor_id > 9) + -> Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id > 9) +(5 rows) + +-- For the purposes of correlated constraints, a compressed chunk is considered as a +-- completed chunk. +-- enable compression +ALTER TABLE sample_table SET ( + timescaledb.compress +); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sample_table" is set to "" +NOTICE: default order by for hypertable "sample_table" is set to "sensor_id DESC, "time" DESC" +-- +-- compress one chunk +SELECT show_chunks('sample_table') AS "CH_NAME" order by 1 limit 1 \gset +SELECT compress_chunk(:'CH_NAME'); +WARNING: no index on "sensor_id" found for correlated constraint on chunk "_hyper_1_1_chunk" + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +-- There should be an entry with min/max range computed for this chunk for this +-- "sensor_id" column +SELECT * FROM _timescaledb_catalog.dimension_slice WHERE dimension_id = :dimension_id; + id | dimension_id | range_start | range_end +----+--------------+----------------------+--------------------- + 3 | 2 | -9223372036854775808 | 9223372036854775807 + 4 | 2 | 1 | 9 +(2 rows) + +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE constraint_name LIKE '_$CC_con%' ORDER BY chunk_id; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-------------------+---------------------------- + 1 | 4 | _$CC_constraint_4 | + 2 | 3 | _$CC_constraint_3 | +(2 rows) + +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + chunk_status +-------------- + 1 +(1 row) + +-- A query using a WHERE clause on "sensor_id" column will scan the proper chunk +-- due to chunk exclusion using correlated constraints ranges calculated above +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +------------------------------ + Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id > 9) +(2 rows) + +-- do update, this will change the status of the chunk +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + chunk_status +-------------- + 9 +(1 row) + +-- The chunk_constraint should point to the MIN_INT/MAX_INT entry now +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE constraint_name LIKE '_$CC_con%' ORDER BY chunk_id; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-------------------+---------------------------- + 1 | 3 | _$CC_constraint_3 | + 2 | 3 | _$CC_constraint_3 | +(2 rows) + +-- A query using a WHERE clause on "sensor_id" column will go back to scanning all the chunks +-- along with an expensive DECOMPRESS on the first chunk +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +--------------------------------------------------------- + Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Vectorized Filter: (sensor_id > 9) + -> Seq Scan on compress_hyper_2_3_chunk + Filter: (_ts_meta_max_1 > 9) + -> Seq Scan on _hyper_1_1_chunk + Filter: (sensor_id > 9) + -> Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id > 9) +(9 rows) + +-- recompress the partial chunk +SELECT compress_chunk(:'CH_NAME'); +WARNING: no index on "sensor_id" found for correlated constraint on chunk "_hyper_1_1_chunk" + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + chunk_status +-------------- + 1 +(1 row) + +-- There should be an entry with min/max range computed for this chunk +SELECT * FROM _timescaledb_catalog.dimension_slice WHERE dimension_id = :dimension_id; + id | dimension_id | range_start | range_end +----+--------------+----------------------+--------------------- + 3 | 2 | -9223372036854775808 | 9223372036854775807 + 4 | 2 | 1 | 9 +(2 rows) + +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE constraint_name LIKE '_$CC_con%' ORDER BY chunk_id; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-------------------+---------------------------- + 1 | 4 | _$CC_constraint_4 | + 2 | 3 | _$CC_constraint_3 | +(2 rows) + +-- A query using a WHERE clause on "sensor_id" column will scan the proper chunk +-- due to chunk exclusion using correlated constraints ranges calculated above +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +------------------------------ + Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id > 9) +(2 rows) + +DROP TABLE sample_table; diff --git a/tsl/test/expected/tsl_tables.out b/tsl/test/expected/tsl_tables.out index 15b66dd99fe..eae45c1e281 100644 --- a/tsl/test/expected/tsl_tables.out +++ b/tsl/test/expected/tsl_tables.out @@ -189,10 +189,10 @@ select remove_retention_policy('test_table'); -- Test set_integer_now_func and add_retention_policy with -- hypertables that have integer time dimension select * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | - 2 | 2 | time | bigint | t | | | | 1 | | | + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 2 | 2 | time | bigint | t | | | | 1 | | | | o (2 rows) \c :TEST_DBNAME :ROLE_SUPERUSER @@ -208,10 +208,10 @@ select set_integer_now_func('test_table_int', 'my_new_schema.dummy_now2'); \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER select * from _timescaledb_catalog.dimension; - id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func -----+---------------+-------------+--------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ - 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | - 2 | 2 | time | bigint | t | | | | 1 | | my_new_schema | dummy_now2 + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func | type +----+---------------+-------------+--------------------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------+------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | | o + 2 | 2 | time | bigint | t | | | | 1 | | my_new_schema | dummy_now2 | o (2 rows) SELECT * FROM _timescaledb_config.bgw_job WHERE proc_name = 'policy_retention' ORDER BY id; diff --git a/tsl/test/shared/expected/extension.out b/tsl/test/shared/expected/extension.out index 84806f9bc28..373ab082831 100644 --- a/tsl/test/shared/expected/extension.out +++ b/tsl/test/shared/expected/extension.out @@ -206,7 +206,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval) add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text) add_dimension(regclass,_timescaledb_internal.dimension_info,boolean) - add_dimension(regclass,name,integer,anyelement,regproc,boolean) + add_dimension(regclass,name,integer,anyelement,regproc,boolean,boolean) add_job(regproc,interval,jsonb,timestamp with time zone,boolean,regproc,boolean,text) add_reorder_policy(regclass,name,boolean,timestamp with time zone,text) add_retention_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval) diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 2893addf957..ba5c4d260f5 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -96,7 +96,7 @@ endif(CMAKE_BUILD_TYPE MATCHES Debug) if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND TEST_FILES chunk_utils_internal.sql - compression_update_delete.sql) + compression_update_delete.sql correlated_constraints.sql) endif() list(APPEND TEST_FILES compression.sql compression_permissions.sql) endif() diff --git a/tsl/test/sql/correlated_constraints.sql b/tsl/test/sql/correlated_constraints.sql new file mode 100644 index 00000000000..c710edd1c0b --- /dev/null +++ b/tsl/test/sql/correlated_constraints.sql @@ -0,0 +1,142 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +\set PREFIX 'EXPLAIN (costs off, timing off, summary off)' + +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name || '.' || c.table_name as chunk_name, + c.status as chunk_status +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; + +CREATE TABLE sample_table ( + time TIMESTAMP WITH TIME ZONE NOT NULL, + sensor_id INTEGER NOT NULL, + cpu double precision null, + temperature double precision null, + name varchar(100) default 'this is a default string value' +); + +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '2 months'); + +\set start_date '2022-01-28 01:09:53.583252+05:30' + +INSERT INTO sample_table + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date'::timestamptz - INTERVAL '1 months', + :'start_date'::timestamptz - INTERVAL '1 week', + INTERVAL '1 hour') AS g1(time), + generate_series(1, 8, 1 ) AS g2(sensor_id) + ORDER BY + time; + +\set start_date '2023-03-17 17:51:11.322998+05:30' + +-- insert into new chunks +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 21.98, 33.123, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 17.66, 13.875, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 13, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 9, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 14, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 15, 0.988, 33.123, 'new row3'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 16, 4.6554, 47, 'new row3'); + +-- Non-int, date, timestamp cannot be specified as a correlated constraint for now +-- We could expand to FLOATs, NUMERICs later +\set ON_ERROR_STOP 0 +SELECT * FROM add_dimension('sample_table', 'name', correlated => true); +\set ON_ERROR_STOP 1 + +-- Specify a correlated constraint +SELECT * FROM add_dimension('sample_table', 'sensor_id', correlated => true); + +-- The above should add a dimension_slice entry with MIN/MAX int64 entries and all +-- existing chunks will point to this to indicate that there is no chunk exclusion +-- yet if this correlated constraint column is used in WHERE clauses +SELECT id AS dimension_id FROM _timescaledb_catalog.dimension WHERE type = 'C' \gset +-- should show MIN_INT/MAX_INT entries +SELECT * FROM _timescaledb_catalog.dimension WHERE type = 'C' AND id = :dimension_id; +SELECT id AS slice_id FROM _timescaledb_catalog.dimension_slice WHERE dimension_id = :dimension_id \gset +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE dimension_slice_id = :slice_id ORDER BY chunk_id; + +-- A query using a WHERE clause on "sensor_id" column will scan all the chunks +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + +-- For the purposes of correlated constraints, a compressed chunk is considered as a +-- completed chunk. + +-- enable compression +ALTER TABLE sample_table SET ( + timescaledb.compress +); + +-- +-- compress one chunk +SELECT show_chunks('sample_table') AS "CH_NAME" order by 1 limit 1 \gset +SELECT compress_chunk(:'CH_NAME'); + +-- There should be an entry with min/max range computed for this chunk for this +-- "sensor_id" column +SELECT * FROM _timescaledb_catalog.dimension_slice WHERE dimension_id = :dimension_id; +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE constraint_name LIKE '_$CC_con%' ORDER BY chunk_id; + +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + +-- A query using a WHERE clause on "sensor_id" column will scan the proper chunk +-- due to chunk exclusion using correlated constraints ranges calculated above +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + +-- do update, this will change the status of the chunk +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; + +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + +-- The chunk_constraint should point to the MIN_INT/MAX_INT entry now +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE constraint_name LIKE '_$CC_con%' ORDER BY chunk_id; + +-- A query using a WHERE clause on "sensor_id" column will go back to scanning all the chunks +-- along with an expensive DECOMPRESS on the first chunk +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + +-- recompress the partial chunk +SELECT compress_chunk(:'CH_NAME'); + +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + +-- There should be an entry with min/max range computed for this chunk +SELECT * FROM _timescaledb_catalog.dimension_slice WHERE dimension_id = :dimension_id; +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE constraint_name LIKE '_$CC_con%' ORDER BY chunk_id; + +-- A query using a WHERE clause on "sensor_id" column will scan the proper chunk +-- due to chunk exclusion using correlated constraints ranges calculated above +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + +-- Newly added chunks should also point to this MIN/MAX entry +\set start_date '2024-01-28 01:09:51.583252+05:30' +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 1, 9.6054, 78.999, 'new row4'); +SELECT * FROM _timescaledb_catalog.chunk_constraint WHERE dimension_slice_id = :slice_id ORDER BY chunk_id; + +DROP TABLE sample_table; diff --git a/tsl/test/sql/include/data/cagg_migrate_integer.sql.gz b/tsl/test/sql/include/data/cagg_migrate_integer.sql.gz index 3d8058d73b86ecbb3455666d37fb514de7b28b21..7c91c1208b4e12ded32f262c54cd0622e2baa97d 100644 GIT binary patch delta 1647 zcmV-#29Wvk4EPKPABzYGSfmbSkq91tPuoZkes+GvsxNV*6cKwV9KAb^(5nCaX1#XowPRx^fQm%vLoz!%`|Zqpv$J;kW9w*Qp~=y^frV}s z7cQIbOnuK`ocUge^cI4DuQ;32SBQC>&S-#!qX`PyRipIV;~5eCAk?NaB02s43ayF0{T0iAO4jnO4B(9u~7omm5`KS_XyS>$3d zy{23@a5u)$0Zk;O62Y8#dIm)_4zhUAzAX;gw{p<_HY*H*Ob&*Nd0`M_V(fT4`m5+I z7&#e_&Of4eUyxqCg$*2JzBs6V+Clgp2ld_@JbMpi_mFXsq;H9j<~Dp{w#&X#hg{tO z7;ay<6c{JjElroO8=R1^dgTmN0Gm|>u)%eOU?YrX9gOyDMhw`_#T}=1Dl%bqPJ|&z za-u!5mlaK~7Tz^sPO{Y2JHWrv4yvehV+S1Yzf|LT9@?TDE9Ng3-Za90yAzv-1R@Yt zJ?}^94w)#8?Bra&vIE)G#7;gsD?5;qP3+_Yxv~R!TxlmsY;qa+a}rROIeNq~pE;3{ zMB!m_qKouRfNYXW+nii4$@HrxlX6x+P&O4n5b(g<4(^C!Gxsi=N9AYgPL2GzWM1m! z_RWk;ZEm~%Y)52Le7bjk+J}dn-tumAk{up;_MEDp+dqD!Q@ioBpFi7j{GWfjRVb76 zOf1HWD(6XK^pL+#hhC6k9s1d5=IPK2Dy~7lI+2^A7hHWh^r8T)L7#0In)aeF z@zV!uYyp+;;}-fI^+!ho>#T3-rNkF7m%iw>`#P1hpzKt%bs4S74zEft+w4{7DScms-hC}#9I(Rv(4T)i zL4?YBjH0_67JA~*WvyawsKvfqEiI`PI;+A}u9KFBa-A(mXw)E~u{jBidLe${)kE^C zrqdspS4&E-Yj#k7_*ryN_*r*Q_*r*Q_*r*Q_>np&T%&m7&5JjFxDE<8>Y$>V-b<-} zln|BPtV?SB=;IePEfeyj(M39Qx+t7p(YZ||S!C4ZXFCl2DFet3QFJnLseTjCOLp58 z54(v2D|(?o(~Tb;gzP)>W|1pmJL^WIX({J+Fr!=y3z0v62m7HgNkR^*rXn>}qrHH_ zBJ9W*hTLJ^+g4ALp3_oCX>0kaNo%C1^|bX-)Icy@xB!C0Pbi-Ih^i zyR=EG24idk#%v0vbb`MOg@kyR2)hUAiL>br}%mt?H`!a)U? z;j)=W9m#}$5OR(J3!*1=0#|_rg^?~wkP?@AGk%rkIw_3ARf2f_QZ<_&09FIKJSlpb z*tr%fYh3t6{_jOTib<0(rumTFh9+yz`iZ;g7ay|EtgnnUTwl35dcbqeyoJ9AMdN^U z$gg~z2<&BE1i_#lCu@`!A?!bTKAm_tmB=Je9%3th(M^O>FZ-4@QOnAtmcj?$bMLw+ z>4Y3LilR8Ap{lwgmr+T5*;S^c{60au)OgFRwQ{XhOKXnu(jQj6Y~lM>m3Nm~%a&Fb z56mx8`bHcBna$WhZgY?|~SB+4hAXG6k&7!xU_ulJRRe&y{*s2k zdf#D}3>*$~7C^ySKtZ`MkdMUc-weYv_l;fi&A}et-`_jfJeTYYGRa{@SnS)k7 zUz?F75iyjAn94U?h*okl$lTN@2hZv4>SI=ay!8p`f9)=Udm<0O(QljDQnphtmOT16 z9lp0dS3HqR$jh8t1lOI$KO{LnI~|^&3m#BtuH*aLd$pK zk4jOzVDaa7ipBWWm9sb*pPrmpW5h)fg7cY^3)hFG1N6>19*xD4XV!SKQCML@j^6IIH*@v_VO-2HW@l}BBda^M(75}qZ=u`y zrOPIJ6W?Gbb{V zC_GF~w2{6IkZp2lmy_!ynSRw|QqJlJ%BCC$0v?#z!98(o=H93CsQ66XtB^le%v*W6 zc{?Q&o7=8G-4mG{?EVdGL%Vr zCKlsGmGh)FddT0WLoZ0N4t;Xq)kQB@^K|G171y9&p2&643$8vLdQkw@pij39b$d~m z`00Z+vVh8)xP^X4yQq(*Wv`^IOKFvMcx8I&X0Jp~>HA9bHno6pz;gRTfBx|V z5i062^6su#=!r)cm5RNg7W-ngRY@(;Sr)EhomF`#*4crCMg!5J04l26oy_EV# z2~pwAx}eq%KYmfuG9k|@x=3eE8-wGirxjxAH<-E)e_sdMe#0oT zUD%|RgE4jiW7Y*zI6;CjcL9Uzf+?IJ!QfrMeBCRf$TEo~Lo!J=@!k2%OEOk|;h+M{ zaM{$Oj$}fA2suZA1<{i_fh)&?+(;WGNQq0mDZgIjI?0X1Rf2f_LN%Km$kL&si9Ktv zn#P4k8!T8U>OlzQoZ_p*svS|+s=2Kb(P-$qIAaS6a2O$|$e-Vb!Y!-n6Q`tyEgJu%0L{&!c7Q zq+1xD#>@J%HvdvO%P$M&2qGs#><*$I$+9s|pZUwCA?Xq{14rShpoBLlK|cXn^)}Bu zC`!zKvBk~_zlK_M?Yj=rGy7k*KG|xNYJF<1ykd=9TtNa8qBy7;0L1kdGz8ZB4!dID zaF{a(3QhwG%DI7TBwqh!7^Zn>?3-_n4)EdO!O{MamX{quJSwZ=>Zs=nTi2Gdot&}cVaM6vz4f`| z8C*hM=G-B;;MD%<$i?~D;1pf*fI>SR-`L*6O`tjib2kX)13pB$hn4M&qr*3iFZG?R`L_;U{ER zeYScRu;9tXpf^6tnp@rRR22G?KsBEaRQ!lgqwBSL6a{Q5eqhu+8G}j!dIU+-w_q-O ny7>5BSXrBXTOn2J?^-x|v(wzp*$;$qjW_=X8rA)y`zincg*7>w diff --git a/tsl/test/sql/include/data/cagg_migrate_timestamp.sql.gz b/tsl/test/sql/include/data/cagg_migrate_timestamp.sql.gz index de12d6cf183a9350914f8e07709cb9b316f43b64..064edc777286083b594edd38d1dba9657ec0ed12 100644 GIT binary patch delta 604 zcmV-i0;Bz|4YLghABzYGe54L$u?S@Ze*k1Ppi3r2TjD#H3d)hhxUa;2xp}5GgP)Dm z2D;0Vs7yF1{{(qrRm2Pe|cFO z|K>GMEA|SHG?N(cVw;t>S~a)4#ol~@RcAn<2UZo=x2=}VEo8AbM*L;V(9I3Hf_34` z7my35?A-x-y5A+^9XHFWMj z%ob2;7q)4jTEdI#jxJe7U$@>ge^pslo2RX?e3V46$w2`kh6pD3QH^T=Ak$ufGw{+j z=!`=6V2%d~J$DEwrv{R7ef?Xjb$Y6_ny0ebK2wyo+Ek^qOo1?Nt*UOXTFyah-)HhW zCo*zoWRy&)nj6LN78SV?69yiwRg3!u{&l2FzE$f;h11u80K6@0E4Iy)f3ffx(^dby z_POAZY*-?wafje)So*!I@%2^z5>1#x;IzT~ww9PJcVn2)qh_X8RZ zJ|XS1)*Zuw7vp|+bd@x>O1av`QPhqCRe3&8)klQtKUIhyRSum)laD$VBTxxJ4}nBI q4d%k9@yGYvN+$hwLK?5jnQ&^=c6B#r9|&WHH~#~jJ#1H|E&u>1k{_}F delta 599 zcmV-d0;v774XzCcABzYG4;a~Hu?S@Ze?T%F+7jQhR8WW{#=Ru|%gr;j8T_oJKyz6V zHE9<+5krck9LA6;sfRICObsX^j9Q-Uxx8CKH6U-Y2s!#HDS}87i{T7q&xJ_&UvY%t z+eie%V&8+rs1pi>Ui9^YP%SDGTCrraE%UyP!uHRI6GaB1fJairGyT9KFHhs&f3@am z#a`NxW)cHVY_sxKtL7H8*t;&U>I?|(-m5D$8o~f3y`AkCF&BIVeEH5WxgLs&NefWZElm2430*olyuM z%<&+h=MDko)Ic(>uYYT`PEVCq^Hf&bXNuBRo2rzSDGTq6u>d+z*1Q@q4%?Z4W`&4}x+42;>rv-o1S%ouA&{u2!Cd$> l{`j6-$)w*-NaJ-m6Hcw#uI}dS17XbY=6^u(D}R_S006NgDk}g0 diff --git a/tsl/test/sql/include/data/cagg_migrate_timestamptz.sql.gz b/tsl/test/sql/include/data/cagg_migrate_timestamptz.sql.gz index 729abe1ce7afdddc5b4ff4be2b4376c363cf67d1..4b8cbcd80ada9cdcc5274205de6e637e45f6909f 100644 GIT binary patch delta 1685 zcmV;G25R}H4XX_ZABzYGl%x)3kq9h*AQDBk^Pb+FM5&WT;-DgMZFN1Z3|`18#?IO6 zlCW!L^!^v$*i8Vtv1FzNT(zBVJvTjJ=iK^-z<_$wtVtjMT))j(HTU+axSXnY!;db1nC zT1^ER`hD~4%%A^NH`@WwJ(!4ul0dYiRx5_e9|u`5sNZD=^*b@Be_dn-K_&)WZ6u5Ay z!zZen^cyko^*vx?`qU&mokcMbOI0l<;H9vLti6>KfH#r?@IYSO*$lO^5o&$0z$#YJ z^qvtt67A#twCDgIq(z7LFfN*1d)5u62I6QJ#MypN>My;gvi!W&9R@ppdO09AEpSN( z?(T$0EI0S8nZKP*upBTD0}EPdHw$kGUe&Ubipbgykg<-Pbnw=8fWdX_q~pA{1GF!- z6XpS)JN6Pg#H2>cPg0v2zL8MjQ7F?!txa%kM713rrY~IkWw}<;KJ18W8B7=)vecb> zZ0OXykF%`gQ14XW)0|p=k=L8I3p~@AZrY0uXI9W(?7>zUxnphroIE?T^d(7tZhrqB zO>GC$e*9>P@xT9Rm%sofF7%#ZK2kp>Cho?I)#u$bs?WP=R3EEp zRI8jC>f@do>du=+wO-Shb9MZ3$o%BsR=tf;aLNond`{*ULE0O&Y+Q0^^Xt^9sJGgCs7f|rrJrR7D8Ps~yIF*DWrN~iIZjq3LHGC>5QhsbD z5X?LiK;YokP!I)4hTks(Np2`|k@4b|v>XnMZ-Ymvji+!%fJfO1k6If~;fw%}x(%K$ zow8h6vst8nN{VeW+w_)JNL}%TfeNIENf#C|L`t|oJ_70B-!A32GU<>RX`>J+Hi@-h z*AeM)W+Zr0g)MuPG@BlP^agb4>^PP9&ZVNFBr)zN@pmL2oXEjXB(*u|;ZM}4+J=&r zfLzLMECDUlomeWT2vikDEzkB`-W;J8&^TR{9DS947D2>`#c+;_ED{Al} z5)RRSJD`Ct%LG5(h^*U>!d^!5uZOte4mzwW`P`M@J{}z8Y4Lnh2sa6d)3aV1ggdqy_*o?G<eTYf2_*;M<<5|$H#{!@`;pxl^ydKR@YUl>(=wp+WooYE(i|aW=?eY zj`GB6VJySTr`W|WVc^$Rv%GcS&ktSlZ?laQ6;5CW;t<^hx*WXIK9$`O4fC2BcL*-z z>c84Fxw;%&peb_*^!ve8>^r!|sR2US5`?k~5bBm7)aoE&HQuj3!HU^;P!kq|tH_jp z#qsF!;zAoC#)}Lbqw!|31p`Ry_THn>@FUVbX}t-ocs?2Q z#+PY((e-H(M(b&?D$fY3`iNNln*+(y#-R&n(oy$(3@Rb&5%8$5!Cd$>`S6Zg$z|Y1 fNRxFn7f!2r+T6_BJHnXb&Hn)A;P}l15iS4#32{-Z delta 1679 zcmV;A25|YS4WZB9v|2xezzpPhMTcKp&E>Yb5}MxD1k9o+^Ki%#29*D)!huHz%Q zf#ByAqc(Yk_r(%SaA<34Sj3!Q>M-IthSkO<^~jXruaqpXs-VtQ16}Doy*rAb@o9JxOm7I2 zn<_Gm`{vnMIRC3*wIZN z$Igd?KlAQ{k&EGA`~kiFjO1E>JKDs5>Wlxj@}KsQ|Bm@;{{PB>@Dx6%UeMp^(KD5^V~p;k9SZOrFb!y200 zGh(bn`*=SqI=}~6(IGxeil*0rbAzdgINCY!w%?QXOYf;FKW`0>!H!;k4v0+)T-t%V zJ0Vib%>ri{Zl@hB2Mol(0y*nu;Vr?dT6QuKS=s?I*0GZf-qH>*xQ?A{oR@Zh_LX+x zJis&0wXsJmYRX}fy3`Df#0rl>nHG{a!La4tf-n^aTslg1(owqr&qW)qJw#vvIYlY|J*_mV5B>lPh{rhTa zE1LG>M^lXd{ZFeBX3W>vWVCAPc&ZH_2>pccd72c#j}Oqg@Od6RA$*>l1@MWse6J66NazPM4E=_>gHBJs>gr|b&s@BJ>_AIP1mDYoF9xZ8 zBdaQ}C(YN?0Y6j5Pq(rrc5g3Pv>pcB822v+ebnjoYBolziqx)XG80;r{brS3vh}Rc zOLd_YdJi?EQLJ95p(p~y=@|Kxo$$r^Jp5i=iM}F&%0^Vo_EuzJyz4G zt#fK3pq2q zIbnR5i(DJ*``jc3r?G@6&G~RIpb+?bBKSTtsq?0BDhWqQD@RF%MM4tR@TsIq#j%w@ zFbymKfumbPQ4}Nve!ui3rJ*Wi#>-pMYB(^y4IZ^Np3)f+9(5}`T5UX~Ga@|NHh8|Y zt8!(@W|1m?8MaMbE3loIy5b816-W__&K+Wkl< zSER?ek?2VkcHBkUY<3`<7pD^6vs6}&B*tAN{*IJ`6D9gtN;%o#Pt>N`W|DS*QtB@3 z02Q8QNxIj7ZAx7b|h&XU7iQrc1fklcm zu~6tGUlNGbk}{!{N;KE8?prAC@60qsAh@U8CAk8czwc0%qnYQTWoqfyYowXPI4Ray ze7%KBYg+of6j*r1S@gid;^J(*l}oEv`i+iox0` ztIPfcnlO(*haX+HzJsfp8X(jyL8!X`p=}95TOUNC#)tJMS~36DY06@B-I%dB8eU$0 zTU;ycz?t3&Gd_?*uy*q{#&&U1l=rU_> zbw!$nAwLaP^%-H+9uaGJJ0N}9cytbJIqIB`KqW>!1RnKtm