From 4ebbd85b6831d40ddcf2d99b16d052357f22321f Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Fri, 3 May 2024 19:59:22 +0200 Subject: [PATCH] Allow DELETE on compressed chunks without decompression When the constraints of a DELETE on a compressed chunks fully cover the batches we can optimize the DELETE to work directly on the compressed batches and skip the expensive decompression part. This optimization is disabled when we detect any JOINs. --- .unreleased/pr_6882 | 1 + src/guc.c | 12 ++ src/guc.h | 1 + src/nodes/chunk_dispatch/chunk_dispatch.h | 1 + src/nodes/hypertable_modify.c | 3 + src/nodes/hypertable_modify.h | 1 + tsl/src/compression/compression.h | 4 + tsl/src/compression/compression_dml.c | 122 +++++++++++-- .../expected/compression_update_delete.out | 121 +++++++++++-- tsl/test/expected/foreign_keys.out | 38 +++- tsl/test/shared/expected/compression_dml.out | 167 ++++++++++++++++++ .../shared/expected/decompress_tracking.out | 28 +++ tsl/test/shared/sql/compression_dml.sql | 63 +++++++ tsl/test/shared/sql/decompress_tracking.sql | 7 + tsl/test/sql/compression_update_delete.sql | 23 ++- tsl/test/sql/foreign_keys.sql | 15 +- 16 files changed, 570 insertions(+), 37 deletions(-) create mode 100644 .unreleased/pr_6882 diff --git a/.unreleased/pr_6882 b/.unreleased/pr_6882 new file mode 100644 index 00000000000..f6a881a54c5 --- /dev/null +++ b/.unreleased/pr_6882 @@ -0,0 +1 @@ +Implements: #6882 Allow DELETE on compressed chunks without decompression diff --git a/src/guc.c b/src/guc.c index be5008f5ea2..9becb10dd1b 100644 --- a/src/guc.c +++ b/src/guc.c @@ -69,6 +69,7 @@ bool ts_guc_enable_foreign_key_propagation = true; TSDLLEXPORT bool ts_guc_enable_cagg_watermark_constify = true; TSDLLEXPORT int ts_guc_cagg_max_individual_materializations = 10; bool ts_guc_enable_osm_reads = true; +TSDLLEXPORT bool ts_guc_enable_compressed_direct_batch_delete = true; TSDLLEXPORT bool ts_guc_enable_dml_decompression = true; TSDLLEXPORT bool ts_guc_enable_dml_decompression_tuple_filtering = true; TSDLLEXPORT int ts_guc_max_tuples_decompressed_per_dml = 100000; @@ -463,6 +464,17 @@ _guc_init(void) NULL, NULL); + DefineCustomBoolVariable(MAKE_EXTOPTION("enable_compressed_direct_batch_delete"), + "Enable direct deletion of compressed batches", + "Enable direct batch deletion in compressed chunks", + &ts_guc_enable_compressed_direct_batch_delete, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomIntVariable(MAKE_EXTOPTION("max_tuples_decompressed_per_dml_transaction"), "The max number of tuples that can be decompressed during an " "INSERT, UPDATE, or DELETE.", diff --git a/src/guc.h b/src/guc.h index 18f69f20ac6..f8e0f15e481 100644 --- a/src/guc.h +++ b/src/guc.h @@ -31,6 +31,7 @@ extern TSDLLEXPORT bool ts_guc_enable_cagg_watermark_constify; extern bool ts_guc_enable_osm_reads; extern TSDLLEXPORT bool ts_guc_enable_dml_decompression; extern TSDLLEXPORT bool ts_guc_enable_dml_decompression_tuple_filtering; +extern TSDLLEXPORT bool ts_guc_enable_compressed_direct_batch_delete; extern TSDLLEXPORT int ts_guc_max_tuples_decompressed_per_dml; extern TSDLLEXPORT bool ts_guc_enable_transparent_decompression; extern TSDLLEXPORT bool ts_guc_enable_compression_wal_markers; diff --git a/src/nodes/chunk_dispatch/chunk_dispatch.h b/src/nodes/chunk_dispatch/chunk_dispatch.h index cc39dde399b..e44af8eda7f 100644 --- a/src/nodes/chunk_dispatch/chunk_dispatch.h +++ b/src/nodes/chunk_dispatch/chunk_dispatch.h @@ -74,6 +74,7 @@ typedef struct ChunkDispatchState ResultRelInfo *rri; /* flag to represent dropped attributes */ bool is_dropped_attr_exists; + int64 batches_deleted; int64 batches_filtered; int64 batches_decompressed; int64 tuples_decompressed; diff --git a/src/nodes/hypertable_modify.c b/src/nodes/hypertable_modify.c index 4cfc25e447f..ef8a91cee70 100644 --- a/src/nodes/hypertable_modify.c +++ b/src/nodes/hypertable_modify.c @@ -240,6 +240,7 @@ hypertable_modify_explain(CustomScanState *node, List *ancestors, ExplainState * foreach (lc, chunk_dispatch_states) { ChunkDispatchState *cds = (ChunkDispatchState *) lfirst(lc); + state->batches_deleted += cds->batches_deleted; state->batches_filtered += cds->batches_filtered; state->batches_decompressed += cds->batches_decompressed; state->tuples_decompressed += cds->tuples_decompressed; @@ -251,6 +252,8 @@ hypertable_modify_explain(CustomScanState *node, List *ancestors, ExplainState * ExplainPropertyInteger("Batches decompressed", NULL, state->batches_decompressed, es); if (state->tuples_decompressed > 0) ExplainPropertyInteger("Tuples decompressed", NULL, state->tuples_decompressed, es); + if (state->batches_deleted > 0) + ExplainPropertyInteger("Batches deleted", NULL, state->batches_deleted, es); } static CustomExecMethods hypertable_modify_state_methods = { diff --git a/src/nodes/hypertable_modify.h b/src/nodes/hypertable_modify.h index 64469c3f5af..9b1fecc87fe 100644 --- a/src/nodes/hypertable_modify.h +++ b/src/nodes/hypertable_modify.h @@ -32,6 +32,7 @@ typedef struct HypertableModifyState int64 tuples_decompressed; int64 batches_decompressed; int64 batches_filtered; + int64 batches_deleted; } HypertableModifyState; extern void ts_hypertable_modify_fixup_tlist(Plan *plan); diff --git a/tsl/src/compression/compression.h b/tsl/src/compression/compression.h index ec0d687f668..b2aa151dd73 100644 --- a/tsl/src/compression/compression.h +++ b/tsl/src/compression/compression.h @@ -138,6 +138,8 @@ typedef struct RowDecompressor CommandId mycid; BulkInsertState bistate; + bool delete_only; + Datum *compressed_datums; bool *compressed_is_nulls; @@ -147,6 +149,7 @@ typedef struct RowDecompressor MemoryContext per_compressed_row_ctx; int64 batches_decompressed; int64 tuples_decompressed; + int64 batches_deleted; TupleTableSlot **decompressed_slots; int unprocessed_tuples; @@ -412,6 +415,7 @@ const CompressionAlgorithmDefinition *algorithm_definition(CompressionAlgorithm struct decompress_batches_stats { + int64 batches_deleted; int64 batches_filtered; int64 batches_decompressed; int64 tuples_decompressed; diff --git a/tsl/src/compression/compression_dml.c b/tsl/src/compression/compression_dml.c index 0dad0c71c67..54fa0978f3d 100644 --- a/tsl/src/compression/compression_dml.c +++ b/tsl/src/compression/compression_dml.c @@ -35,12 +35,11 @@ decompress_batches_indexscan(Relation in_rel, Relation out_rel, Relation index_r ScanKeyData *heap_scankeys, int num_heap_scankeys, ScanKeyData *mem_scankeys, int num_mem_scankeys, tuple_filtering_constraints *constraints, bool *skip_current_tuple, - Bitmapset *null_columns, List *is_nulls); -static struct decompress_batches_stats -decompress_batches_seqscan(Relation in_rel, Relation out_rel, Snapshot snapshot, - ScanKeyData *scankeys, int num_scankeys, ScanKeyData *mem_scankeys, - int num_mem_scankeys, tuple_filtering_constraints *constraints, - bool *skip_current_tuple, Bitmapset *null_columns, List *is_nulls); + bool delete_only, Bitmapset *null_columns, List *is_nulls); +static struct decompress_batches_stats decompress_batches_seqscan( + Relation in_rel, Relation out_rel, Snapshot snapshot, ScanKeyData *scankeys, int num_scankeys, + ScanKeyData *mem_scankeys, int num_mem_scankeys, tuple_filtering_constraints *constraints, + bool *skip_current_tuple, bool delete_only, Bitmapset *null_columns, List *is_nulls); static bool batch_matches(RowDecompressor *decompressor, ScanKeyData *scankeys, int num_scankeys, tuple_filtering_constraints *constraints, bool *skip_current_tuple); @@ -60,6 +59,9 @@ static void report_error(TM_Result result); static bool key_column_is_null(tuple_filtering_constraints *constraints, Relation chunk_rel, Oid ht_relid, TupleTableSlot *slot); +static bool can_delete_without_decompression(HypertableModifyState *ht_state, + CompressionSettings *settings, Chunk *chunk, + List *predicates); void decompress_batches_for_insert(const ChunkInsertState *cis, TupleTableSlot *slot) @@ -167,6 +169,7 @@ decompress_batches_for_insert(const ChunkInsertState *cis, TupleTableSlot *slot) num_mem_scankeys, constraints, &skip_current_tuple, + false, NULL, /* no null column check for non-segmentby columns */ NIL); @@ -193,6 +196,7 @@ decompress_batches_for_insert(const ChunkInsertState *cis, TupleTableSlot *slot) num_mem_scankeys, constraints, &skip_current_tuple, + false, null_columns, NIL); } @@ -203,6 +207,7 @@ decompress_batches_for_insert(const ChunkInsertState *cis, TupleTableSlot *slot) cis->cds->skip_current_tuple = true; } + cis->cds->batches_deleted += stats.batches_deleted; cis->cds->batches_filtered += stats.batches_filtered; cis->cds->batches_decompressed += stats.batches_decompressed; cis->cds->tuples_decompressed += stats.tuples_decompressed; @@ -223,7 +228,7 @@ decompress_batches_for_insert(const ChunkInsertState *cis, TupleTableSlot *slot) */ static bool decompress_batches_for_update_delete(HypertableModifyState *ht_state, Chunk *chunk, - List *predicates, EState *estate) + List *predicates, EState *estate, bool has_joins) { /* process each chunk with its corresponding predicates */ @@ -248,6 +253,8 @@ decompress_batches_for_update_delete(HypertableModifyState *ht_state, Chunk *chu comp_chunk = ts_chunk_get_by_id(chunk->fd.compressed_chunk_id, true); CompressionSettings *settings = ts_compression_settings_get(comp_chunk->table_id); + bool delete_only = ht_state->mt->operation == CMD_DELETE && !has_joins && + can_delete_without_decompression(ht_state, settings, chunk, predicates); process_predicates(chunk, settings, @@ -289,6 +296,7 @@ decompress_batches_for_update_delete(HypertableModifyState *ht_state, Chunk *chu num_mem_scankeys, NULL, NULL, + delete_only, null_columns, is_null); /* close the selected index */ @@ -305,6 +313,7 @@ decompress_batches_for_update_delete(HypertableModifyState *ht_state, Chunk *chu num_mem_scankeys, NULL, NULL, + delete_only, null_columns, is_null); } @@ -329,6 +338,7 @@ decompress_batches_for_update_delete(HypertableModifyState *ht_state, Chunk *chu filter = lfirst(lc); pfree(filter); } + ht_state->batches_deleted += stats.batches_deleted; ht_state->batches_filtered += stats.batches_filtered; ht_state->batches_decompressed += stats.batches_decompressed; ht_state->tuples_decompressed += stats.tuples_decompressed; @@ -351,7 +361,7 @@ decompress_batches_indexscan(Relation in_rel, Relation out_rel, Relation index_r ScanKeyData *heap_scankeys, int num_heap_scankeys, ScanKeyData *mem_scankeys, int num_mem_scankeys, tuple_filtering_constraints *constraints, bool *skip_current_tuple, - Bitmapset *null_columns, List *is_nulls) + bool delete_only, Bitmapset *null_columns, List *is_nulls) { HeapTuple compressed_tuple; RowDecompressor decompressor; @@ -403,7 +413,7 @@ decompress_batches_indexscan(Relation in_rel, Relation out_rel, Relation index_r valid = true; for (; attrno >= 0; attrno = bms_next_member(null_columns, attrno)) { - is_null_condition = list_nth_int(is_nulls, pos); + is_null_condition = is_nulls && list_nth_int(is_nulls, pos); seg_col_is_null = slot_attisnull(slot, attrno); if ((seg_col_is_null && !is_null_condition) || (!seg_col_is_null && is_null_condition)) { @@ -426,6 +436,8 @@ decompress_batches_indexscan(Relation in_rel, Relation out_rel, Relation index_r if (!decompressor_initialized) { decompressor = build_decompressor(in_rel, out_rel); + decompressor.delete_only = delete_only; + decompressor_initialized = true; } @@ -475,8 +487,15 @@ decompress_batches_indexscan(Relation in_rel, Relation out_rel, Relation index_r report_error(result); return stats; } - stats.tuples_decompressed += row_decompressor_decompress_row_to_table(&decompressor); - stats.batches_decompressed++; + if (decompressor.delete_only) + { + stats.batches_deleted++; + } + else + { + stats.tuples_decompressed += row_decompressor_decompress_row_to_table(&decompressor); + stats.batches_decompressed++; + } write_logical_replication_msg_decompression_end(); } @@ -515,7 +534,8 @@ static struct decompress_batches_stats decompress_batches_seqscan(Relation in_rel, Relation out_rel, Snapshot snapshot, ScanKeyData *scankeys, int num_scankeys, ScanKeyData *mem_scankeys, int num_mem_scankeys, tuple_filtering_constraints *constraints, - bool *skip_current_tuple, Bitmapset *null_columns, List *is_nulls) + bool *skip_current_tuple, bool delete_only, Bitmapset *null_columns, + List *is_nulls) { RowDecompressor decompressor; bool decompressor_initialized = false; @@ -568,6 +588,7 @@ decompress_batches_seqscan(Relation in_rel, Relation out_rel, Snapshot snapshot, if (!decompressor_initialized) { decompressor = build_decompressor(in_rel, out_rel); + decompressor.delete_only = delete_only; decompressor_initialized = true; } @@ -612,8 +633,15 @@ decompress_batches_seqscan(Relation in_rel, Relation out_rel, Snapshot snapshot, table_endscan(scan); report_error(result); } - stats.tuples_decompressed += row_decompressor_decompress_row_to_table(&decompressor); - stats.batches_decompressed++; + if (decompressor.delete_only) + { + stats.batches_deleted++; + } + else + { + stats.tuples_decompressed += row_decompressor_decompress_row_to_table(&decompressor); + stats.batches_decompressed++; + } write_logical_replication_msg_decompression_end(); } if (scankeys) @@ -690,6 +718,7 @@ struct decompress_chunk_context HypertableModifyState *ht_state; /* indicates decompression actually occurred */ bool batches_decompressed; + bool has_joins; }; static bool decompress_chunk_walker(PlanState *ps, struct decompress_chunk_context *ctx); @@ -751,6 +780,13 @@ decompress_chunk_walker(PlanState *ps, struct decompress_chunk_context *ctx) needs_decompression = true; break; } + case T_NestLoopState: + case T_MergeJoinState: + case T_HashJoinState: + { + ctx->has_joins = true; + break; + } default: break; } @@ -777,7 +813,8 @@ decompress_chunk_walker(PlanState *ps, struct decompress_chunk_context *ctx) batches_decompressed = decompress_batches_for_update_delete(ctx->ht_state, current_chunk, predicates, - ps->state); + ps->state, + ctx->has_joins); ctx->batches_decompressed |= batches_decompressed; /* This is a workaround specifically for bitmap heap scans: @@ -1415,3 +1452,58 @@ key_column_is_null(tuple_filtering_constraints *constraints, Relation chunk_rel, return false; } + +static bool +can_delete_without_decompression(HypertableModifyState *ht_state, CompressionSettings *settings, + Chunk *chunk, List *predicates) +{ + ListCell *lc; + + if (!ts_guc_enable_compressed_direct_batch_delete) + return false; + + /* + * If there is a RETURNING clause we skip the optimization to delete compressed batches directly + */ + if (ht_state->mt->returningLists) + return false; + + /* + * If there are any DELETE row triggers on the hypertable we skip the optimization + * to delete compressed batches directly. + */ + ModifyTableState *ps = + linitial_node(ModifyTableState, castNode(CustomScanState, ht_state)->custom_ps); + if (ps->rootResultRelInfo->ri_TrigDesc) + { + TriggerDesc *trigdesc = ps->rootResultRelInfo->ri_TrigDesc; + if (trigdesc->trig_delete_before_row || trigdesc->trig_delete_after_row || + trigdesc->trig_delete_instead_row) + { + return false; + } + } + + foreach (lc, predicates) + { + Node *node = lfirst(lc); + Var *var; + Expr *arg_value; + Oid opno; + + if (ts_extract_expr_args((Expr *) node, &var, &arg_value, &opno, NULL)) + { + if (!IsA(arg_value, Const)) + { + return false; + } + char *column_name = get_attname(chunk->table_id, var->varattno, false); + if (ts_array_is_member(settings->fd.segmentby, column_name)) + { + continue; + } + } + return false; + } + return true; +} diff --git a/tsl/test/expected/compression_update_delete.out b/tsl/test/expected/compression_update_delete.out index bd73b73c32c..b827ee5819a 100644 --- a/tsl/test/expected/compression_update_delete.out +++ b/tsl/test/expected/compression_update_delete.out @@ -980,7 +980,7 @@ FROM compressed_chunk_info_view WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; chunk_status | CHUNK_NAME --------------+-------------------- - 9 | _hyper_13_27_chunk + 1 | _hyper_13_27_chunk 1 | _hyper_13_28_chunk (2 rows) @@ -1143,7 +1143,7 @@ WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; chunk_status | CHUNK_NAME --------------+-------------------- 1 | _hyper_15_31_chunk - 9 | _hyper_15_32_chunk + 1 | _hyper_15_32_chunk (2 rows) -- get rowcount from compressed chunks where device_id IS NULL @@ -2960,6 +2960,10 @@ INSERT INTO test_pushdown SELECT '2020-01-01', 'b'; INSERT INTO test_pushdown SELECT '2020-01-01 05:00', 'c'; CREATE TABLE devices(device text); INSERT INTO devices VALUES ('a'), ('b'), ('c'); +CREATE TABLE devices2(device text); +INSERT INTO devices2 VALUES ('d'), ('e'), ('f'); +CREATE TABLE devices3(device text); +INSERT INTO devices3 VALUES ('b'), ('d'), ('g'); ALTER TABLE test_pushdown SET (timescaledb.compress, timescaledb.compress_segmentby='device'); NOTICE: default order by for hypertable "test_pushdown" is set to ""time" DESC" SELECT compress_chunk(show_chunks('test_pushdown')); @@ -2969,6 +2973,7 @@ SELECT compress_chunk(show_chunks('test_pushdown')); (1 row) -- 3 batch decompressions means pushdown is not working so we expect less than 3 for all these queries +SET timescaledb.enable_compressed_direct_batch_delete TO false; BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'a' = device; ROLLBACK; QUERY PLAN ------------------------------------------------------------------------------------ @@ -3065,6 +3070,7 @@ BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' <= device; ROLLBACK; Filter: ('b'::text <= device) (7 rows) +RESET timescaledb.enable_compressed_direct_batch_delete; -- cant pushdown OR atm BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = 'a' OR device = 'b'; ROLLBACK; QUERY PLAN @@ -3107,8 +3113,10 @@ BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = substring(CURRENT_USER, Filter: (device = ("substring"((CURRENT_USER)::text, (length((CURRENT_USER)::text) + 1)) || 'c'::text)) (9 rows) +-- JOIN tests -- no filtering in decompression -BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device; ROLLBACK; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; QUERY PLAN ------------------------------------------------------------------------------------ Custom Scan (HypertableModify) (actual rows=0 loops=1) @@ -3116,20 +3124,54 @@ BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.dev Tuples decompressed: 3 -> Delete on test_pushdown p (actual rows=0 loops=1) Delete on _hyper_39_79_chunk p_1 - -> Merge Join (actual rows=3 loops=1) + -> Merge Join (actual rows=1 loops=1) Merge Cond: (p_1.device = d.device) -> Sort (actual rows=3 loops=1) Sort Key: p_1.device Sort Method: quicksort -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: d.device + Sort Method: quicksort + -> Seq Scan on devices3 d (actual rows=3 loops=1) +(15 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (p_1.device = d.device) -> Sort (actual rows=3 loops=1) + Sort Key: p_1.device + Sort Method: quicksort + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) Sort Key: d.device Sort Method: quicksort - -> Seq Scan on devices d (actual rows=3 loops=1) + -> Seq Scan on devices3 d (actual rows=3 loops=1) (15 rows) + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + -- can filter in decompression even before executing join -BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b' ; ROLLBACK; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; QUERY PLAN ------------------------------------------------------------------------------------ Custom Scan (HypertableModify) (actual rows=0 loops=1) @@ -3146,8 +3188,39 @@ BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.dev Filter: (device = 'b'::text) (12 rows) + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Nested Loop (actual rows=1 loops=1) + -> Seq Scan on devices d (actual rows=1 loops=1) + Filter: (device = 'b'::text) + Rows Removed by Filter: 2 + -> Materialize (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=1 loops=1) + Filter: (device = 'b'::text) +(12 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + -- test prepared statement PREPARE q1(text) AS DELETE FROM test_pushdown WHERE device = $1; +SET timescaledb.enable_compressed_direct_batch_delete TO false; BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; QUERY PLAN ------------------------------------------------------------------------------------ @@ -3160,30 +3233,50 @@ BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; Filter: (device = 'a'::text) (7 rows) +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'a'::text) +(6 rows) + +BEGIN; :EXPLAIN EXECUTE q1('not here'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'not here'::text) +(5 rows) + -- test arrayop pushdown less than 3 decompressions are expected for successful pushdown BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a','d'); ROLLBACK; QUERY PLAN ------------------------------------------------------------------------------------ Custom Scan (HypertableModify) (actual rows=0 loops=1) - Batches decompressed: 1 - Tuples decompressed: 1 + Batches deleted: 1 -> Delete on test_pushdown (actual rows=0 loops=1) Delete on _hyper_39_79_chunk test_pushdown_1 - -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) Filter: (device = ANY ('{a,d}'::text[])) -(7 rows) +(6 rows) BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = ANY('{a,d}'); ROLLBACK; QUERY PLAN ------------------------------------------------------------------------------------ Custom Scan (HypertableModify) (actual rows=0 loops=1) - Batches decompressed: 1 - Tuples decompressed: 1 + Batches deleted: 1 -> Delete on test_pushdown (actual rows=0 loops=1) Delete on _hyper_39_79_chunk test_pushdown_1 - -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) Filter: (device = ANY ('{a,d}'::text[])) -(7 rows) +(6 rows) BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a',CURRENT_USER); ROLLBACK; QUERY PLAN diff --git a/tsl/test/expected/foreign_keys.out b/tsl/test/expected/foreign_keys.out index cac98f3fee3..ebf0c1c4618 100644 --- a/tsl/test/expected/foreign_keys.out +++ b/tsl/test/expected/foreign_keys.out @@ -761,7 +761,8 @@ SELECT * FROM ht; (1 row) ROLLBACK; --- ON DELETE CASCADE with compression +-- ON DELETE CASCADE with compression without direct batch delete +SET timescaledb.enable_compressed_direct_batch_delete TO false; BEGIN; INSERT INTO fk_cascade(fk_cascade) VALUES ('fk_cascade'); INSERT INTO ht(time, fk_cascade) VALUES ('2020-01-01', 'fk_cascade'); @@ -788,6 +789,33 @@ EXPLAIN (analyze, costs off, timing off, summary off) SELECT * FROM ht; -> Seq Scan on _hyper_13_14_chunk (actual rows=0 loops=1) (4 rows) +ROLLBACK; +RESET timescaledb.enable_compressed_direct_batch_delete; +-- ON DELETE CASCADE with compression and direct batch delete +BEGIN; +INSERT INTO fk_cascade(fk_cascade) VALUES ('fk_cascade'); +INSERT INTO ht(time, fk_cascade) VALUES ('2020-01-01', 'fk_cascade'); +SELECT count(compress_chunk(ch)) FROM show_chunks('ht') ch; + count +------- + 1 +(1 row) + +-- should cascade +DELETE FROM fk_cascade; +SELECT * FROM ht; + time | fk_no_action | fk_restrict | fk_cascade | fk_set_null | fk_set_default +------------------------------+--------------+-------------+------------+-------------+---------------- + Wed Jan 01 00:00:00 2020 PST | | | | | default +(1 row) + +EXPLAIN (analyze, costs off, timing off, summary off) SELECT * FROM ht; + QUERY PLAN +----------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _hyper_13_14_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_14_21_chunk (actual rows=1 loops=1) +(2 rows) + ROLLBACK; -- SET NULL -- should fail with foreign key violation @@ -831,7 +859,7 @@ EXPLAIN (analyze, costs off, timing off, summary off) SELECT * FROM ht; ----------------------------------------------------------------------------------- Append (actual rows=2 loops=1) -> Custom Scan (DecompressChunk) on _hyper_13_14_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_14_21_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_14_22_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_13_14_chunk (actual rows=1 loops=1) (4 rows) @@ -874,7 +902,7 @@ EXPLAIN (analyze, costs off, timing off, summary off) SELECT * FROM ht; ----------------------------------------------------------------------------------- Append (actual rows=2 loops=1) -> Custom Scan (DecompressChunk) on _hyper_13_14_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_14_22_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_14_23_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_13_14_chunk (actual rows=1 loops=1) (4 rows) @@ -933,7 +961,7 @@ EXPLAIN (analyze, costs off, timing off, summary off) SELECT * FROM ht; ----------------------------------------------------------------------------------- Append (actual rows=2 loops=1) -> Custom Scan (DecompressChunk) on _hyper_13_14_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_14_23_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_14_24_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_13_14_chunk (actual rows=1 loops=1) (4 rows) @@ -981,7 +1009,7 @@ EXPLAIN (analyze, costs off, timing off, summary off) SELECT * FROM ht; ----------------------------------------------------------------------------------- Append (actual rows=2 loops=1) -> Custom Scan (DecompressChunk) on _hyper_13_14_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_14_24_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_14_25_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_13_14_chunk (actual rows=1 loops=1) (4 rows) diff --git a/tsl/test/shared/expected/compression_dml.out b/tsl/test/shared/expected/compression_dml.out index 72e600a4b56..38cb54edd5a 100644 --- a/tsl/test/shared/expected/compression_dml.out +++ b/tsl/test/shared/expected/compression_dml.out @@ -415,3 +415,170 @@ QUERY PLAN RESET timescaledb.enable_dml_decompression_tuple_filtering; DROP TABLE lazy_decompress; +CREATE FUNCTION trigger_function() RETURNS TRIGGER LANGUAGE PLPGSQL +AS $$ +BEGIN + RAISE WARNING 'Trigger fired'; + RETURN NEW; +END; +$$; +-- test direct delete on compressed hypertable +CREATE TABLE direct_delete(time timestamptz not null, device text, reading text, value float); +SELECT table_name FROM create_hypertable('direct_delete', 'time'); + table_name + direct_delete +(1 row) + +ALTER TABLE direct_delete SET (timescaledb.compress, timescaledb.compress_segmentby = 'device, reading'); +NOTICE: default order by for hypertable "direct_delete" is set to ""time" DESC" +INSERT INTO direct_delete VALUES +('2021-01-01', 'd1', 'r1', 1.0), +('2021-01-01', 'd1', 'r2', 1.0), +('2021-01-01', 'd1', 'r3', 1.0), +('2021-01-01', 'd2', 'r1', 1.0), +('2021-01-01', 'd2', 'r2', 1.0), +('2021-01-01', 'd2', 'r3', 1.0); +SELECT count(compress_chunk(c)) FROM show_chunks('direct_delete') c; + count + 1 +(1 row) + +BEGIN; +-- should be 3 batches directly deleted +:ANALYZE DELETE FROM direct_delete WHERE device='d1'; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 3 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=0 loops=1) + Filter: (device = 'd1'::text) +(6 rows) + +-- double check its actually deleted +SELECT count(*) FROM direct_delete WHERE device='d1'; + count + 0 +(1 row) + +ROLLBACK; +BEGIN; +-- should be 2 batches directly deleted +:ANALYZE DELETE FROM direct_delete WHERE reading='r2'; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 2 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=0 loops=1) + Filter: (reading = 'r2'::text) +(6 rows) + +-- double check its actually deleted +SELECT count(*) FROM direct_delete WHERE reading='r2'; + count + 0 +(1 row) + +ROLLBACK; +-- combining constraints on segmentby columns should work +BEGIN; +-- should be 1 batches directly deleted +:ANALYZE DELETE FROM direct_delete WHERE device='d1' AND reading='r2'; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=0 loops=1) + Filter: ((device = 'd1'::text) AND (reading = 'r2'::text)) +(6 rows) + +-- double check its actually deleted +SELECT count(*) FROM direct_delete WHERE device='d1' AND reading='r2'; + count + 0 +(1 row) + +ROLLBACK; +-- constraints involving non-segmentby columns should not directly delete +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE value = '1.0'; ROLLBACK; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 6 + Tuples decompressed: 6 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=6 loops=1) + Filter: (value = '1'::double precision) +(7 rows) + +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1' AND value = '1.0'; ROLLBACK; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=3 loops=1) + Filter: ((device = 'd1'::text) AND (value = '1'::double precision)) +(7 rows) + +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE reading = 'r1' AND value = '1.0'; ROLLBACK; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=2 loops=1) + Filter: ((reading = 'r1'::text) AND (value = '1'::double precision)) +(7 rows) + +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd2' AND reading = 'r3' AND value = '1.0'; ROLLBACK; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=1 loops=1) + Filter: ((device = 'd2'::text) AND (reading = 'r3'::text) AND (value = '1'::double precision)) +(7 rows) + +-- presence of trigger should prevent direct delete +CREATE TRIGGER direct_delete_trigger BEFORE DELETE ON direct_delete FOR EACH ROW EXECUTE FUNCTION trigger_function(); +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1'; ROLLBACK; +WARNING: Trigger fired +WARNING: Trigger fired +WARNING: Trigger fired +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=3 loops=1) + Filter: (device = 'd1'::text) + Trigger direct_delete_trigger on _hyper_X_X_chunk: calls=3 +(8 rows) + +DROP TRIGGER direct_delete_trigger ON direct_delete; +CREATE TRIGGER direct_delete_trigger AFTER DELETE ON direct_delete FOR EACH ROW EXECUTE FUNCTION trigger_function(); +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1'; ROLLBACK; +WARNING: Trigger fired +WARNING: Trigger fired +WARNING: Trigger fired +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=3 loops=1) + Filter: (device = 'd1'::text) + Trigger direct_delete_trigger on _hyper_X_X_chunk: calls=3 +(8 rows) + +DROP TRIGGER direct_delete_trigger ON direct_delete; +DROP TABLE direct_delete; diff --git a/tsl/test/shared/expected/decompress_tracking.out b/tsl/test/shared/expected/decompress_tracking.out index 1fdf732117b..9ee349fce9a 100644 --- a/tsl/test/shared/expected/decompress_tracking.out +++ b/tsl/test/shared/expected/decompress_tracking.out @@ -63,6 +63,7 @@ QUERY PLAN Filter: (device = 'd2'::text) (12 rows) +SET timescaledb.enable_compressed_direct_batch_delete TO false; BEGIN; :EXPLAIN_ANALYZE DELETE FROM decompress_tracking; ROLLBACK; QUERY PLAN Custom Scan (HypertableModify) (actual rows=0 loops=1) @@ -91,6 +92,33 @@ QUERY PLAN Filter: (device = 'd3'::text) (11 rows) +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN_ANALYZE DELETE FROM decompress_tracking; ROLLBACK; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 5 + -> Delete on decompress_tracking (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk decompress_tracking_1 + Delete on _hyper_X_X_chunk decompress_tracking_2 + -> Append (actual rows=0 loops=1) + -> Seq Scan on _hyper_X_X_chunk decompress_tracking_1 (actual rows=0 loops=1) + -> Seq Scan on _hyper_X_X_chunk decompress_tracking_2 (actual rows=0 loops=1) +(8 rows) + +BEGIN; :EXPLAIN_ANALYZE DELETE FROM decompress_tracking WHERE device = 'd3'; ROLLBACK; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 2 + -> Delete on decompress_tracking (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk decompress_tracking_1 + Delete on _hyper_X_X_chunk decompress_tracking_2 + -> Append (actual rows=0 loops=1) + -> Seq Scan on _hyper_X_X_chunk decompress_tracking_1 (actual rows=0 loops=1) + Filter: (device = 'd3'::text) + -> Seq Scan on _hyper_X_X_chunk decompress_tracking_2 (actual rows=0 loops=1) + Filter: (device = 'd3'::text) +(10 rows) + BEGIN; :EXPLAIN_ANALYZE INSERT INTO decompress_tracking SELECT '2020-01-01 1:30','d1',random(); ROLLBACK; QUERY PLAN Custom Scan (HypertableModify) (actual rows=0 loops=1) diff --git a/tsl/test/shared/sql/compression_dml.sql b/tsl/test/shared/sql/compression_dml.sql index 51755a46d90..098e4400e7d 100644 --- a/tsl/test/shared/sql/compression_dml.sql +++ b/tsl/test/shared/sql/compression_dml.sql @@ -188,3 +188,66 @@ RESET timescaledb.enable_dml_decompression_tuple_filtering; DROP TABLE lazy_decompress; +CREATE FUNCTION trigger_function() RETURNS TRIGGER LANGUAGE PLPGSQL +AS $$ +BEGIN + RAISE WARNING 'Trigger fired'; + RETURN NEW; +END; +$$; + +-- test direct delete on compressed hypertable +CREATE TABLE direct_delete(time timestamptz not null, device text, reading text, value float); +SELECT table_name FROM create_hypertable('direct_delete', 'time'); + +ALTER TABLE direct_delete SET (timescaledb.compress, timescaledb.compress_segmentby = 'device, reading'); +INSERT INTO direct_delete VALUES +('2021-01-01', 'd1', 'r1', 1.0), +('2021-01-01', 'd1', 'r2', 1.0), +('2021-01-01', 'd1', 'r3', 1.0), +('2021-01-01', 'd2', 'r1', 1.0), +('2021-01-01', 'd2', 'r2', 1.0), +('2021-01-01', 'd2', 'r3', 1.0); + +SELECT count(compress_chunk(c)) FROM show_chunks('direct_delete') c; + +BEGIN; +-- should be 3 batches directly deleted +:ANALYZE DELETE FROM direct_delete WHERE device='d1'; +-- double check its actually deleted +SELECT count(*) FROM direct_delete WHERE device='d1'; +ROLLBACK; + +BEGIN; +-- should be 2 batches directly deleted +:ANALYZE DELETE FROM direct_delete WHERE reading='r2'; +-- double check its actually deleted +SELECT count(*) FROM direct_delete WHERE reading='r2'; +ROLLBACK; + +-- combining constraints on segmentby columns should work +BEGIN; +-- should be 1 batches directly deleted +:ANALYZE DELETE FROM direct_delete WHERE device='d1' AND reading='r2'; +-- double check its actually deleted +SELECT count(*) FROM direct_delete WHERE device='d1' AND reading='r2'; +ROLLBACK; + +-- constraints involving non-segmentby columns should not directly delete +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE value = '1.0'; ROLLBACK; +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1' AND value = '1.0'; ROLLBACK; +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE reading = 'r1' AND value = '1.0'; ROLLBACK; +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd2' AND reading = 'r3' AND value = '1.0'; ROLLBACK; + +-- presence of trigger should prevent direct delete +CREATE TRIGGER direct_delete_trigger BEFORE DELETE ON direct_delete FOR EACH ROW EXECUTE FUNCTION trigger_function(); +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1'; ROLLBACK; +DROP TRIGGER direct_delete_trigger ON direct_delete; + +CREATE TRIGGER direct_delete_trigger AFTER DELETE ON direct_delete FOR EACH ROW EXECUTE FUNCTION trigger_function(); +BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1'; ROLLBACK; +DROP TRIGGER direct_delete_trigger ON direct_delete; + + +DROP TABLE direct_delete; + diff --git a/tsl/test/shared/sql/decompress_tracking.sql b/tsl/test/shared/sql/decompress_tracking.sql index 55fd23d7d95..aa439282014 100644 --- a/tsl/test/shared/sql/decompress_tracking.sql +++ b/tsl/test/shared/sql/decompress_tracking.sql @@ -22,8 +22,15 @@ ANALYZE decompress_tracking; BEGIN; :EXPLAIN_ANALYZE UPDATE decompress_tracking SET value = value + 3; ROLLBACK; BEGIN; :EXPLAIN_ANALYZE UPDATE decompress_tracking SET value = value + 3 WHERE device = 'd2'; ROLLBACK; + +SET timescaledb.enable_compressed_direct_batch_delete TO false; BEGIN; :EXPLAIN_ANALYZE DELETE FROM decompress_tracking; ROLLBACK; BEGIN; :EXPLAIN_ANALYZE DELETE FROM decompress_tracking WHERE device = 'd3'; ROLLBACK; +RESET timescaledb.enable_compressed_direct_batch_delete; + +BEGIN; :EXPLAIN_ANALYZE DELETE FROM decompress_tracking; ROLLBACK; +BEGIN; :EXPLAIN_ANALYZE DELETE FROM decompress_tracking WHERE device = 'd3'; ROLLBACK; + BEGIN; :EXPLAIN_ANALYZE INSERT INTO decompress_tracking SELECT '2020-01-01 1:30','d1',random(); ROLLBACK; BEGIN; :EXPLAIN_ANALYZE INSERT INTO decompress_tracking SELECT '2020-01-01','d2',random(); ROLLBACK; BEGIN; :EXPLAIN_ANALYZE INSERT INTO decompress_tracking SELECT '2020-01-01','d4',random(); ROLLBACK; diff --git a/tsl/test/sql/compression_update_delete.sql b/tsl/test/sql/compression_update_delete.sql index 25cc67b7904..afb29e4e924 100644 --- a/tsl/test/sql/compression_update_delete.sql +++ b/tsl/test/sql/compression_update_delete.sql @@ -1504,11 +1504,16 @@ INSERT INTO test_pushdown SELECT '2020-01-01 05:00', 'c'; CREATE TABLE devices(device text); INSERT INTO devices VALUES ('a'), ('b'), ('c'); +CREATE TABLE devices2(device text); +INSERT INTO devices2 VALUES ('d'), ('e'), ('f'); +CREATE TABLE devices3(device text); +INSERT INTO devices3 VALUES ('b'), ('d'), ('g'); ALTER TABLE test_pushdown SET (timescaledb.compress, timescaledb.compress_segmentby='device'); SELECT compress_chunk(show_chunks('test_pushdown')); -- 3 batch decompressions means pushdown is not working so we expect less than 3 for all these queries +SET timescaledb.enable_compressed_direct_batch_delete TO false; BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'a' = device; ROLLBACK; BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device < 'c' ; ROLLBACK; BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'c' > device; ROLLBACK; @@ -1517,6 +1522,7 @@ BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device > 'b'; ROLLBACK; BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = CURRENT_USER; ROLLBACK; BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' < device; ROLLBACK; BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' <= device; ROLLBACK; +RESET timescaledb.enable_compressed_direct_batch_delete; -- cant pushdown OR atm BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = 'a' OR device = 'b'; ROLLBACK; @@ -1525,13 +1531,26 @@ BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = 'a' OR device = 'b'; RO BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE time = timestamptz('2020-01-01 05:00'); ROLLBACK; -- test sqlvaluefunction BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = substring(CURRENT_USER,length(CURRENT_USER)+1) || 'c'; ROLLBACK; + +-- JOIN tests -- no filtering in decompression -BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device; ROLLBACK; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; -- can filter in decompression even before executing join -BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b' ; ROLLBACK; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + -- test prepared statement PREPARE q1(text) AS DELETE FROM test_pushdown WHERE device = $1; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; +RESET timescaledb.enable_compressed_direct_batch_delete; BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; +BEGIN; :EXPLAIN EXECUTE q1('not here'); ROLLBACK; -- test arrayop pushdown less than 3 decompressions are expected for successful pushdown BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a','d'); ROLLBACK; diff --git a/tsl/test/sql/foreign_keys.sql b/tsl/test/sql/foreign_keys.sql index 3ba6000aa3b..c8af332f7b8 100644 --- a/tsl/test/sql/foreign_keys.sql +++ b/tsl/test/sql/foreign_keys.sql @@ -531,7 +531,20 @@ DELETE FROM fk_cascade; SELECT * FROM ht; ROLLBACK; --- ON DELETE CASCADE with compression +-- ON DELETE CASCADE with compression without direct batch delete +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; +INSERT INTO fk_cascade(fk_cascade) VALUES ('fk_cascade'); +INSERT INTO ht(time, fk_cascade) VALUES ('2020-01-01', 'fk_cascade'); +SELECT count(compress_chunk(ch)) FROM show_chunks('ht') ch; +-- should cascade +DELETE FROM fk_cascade; +SELECT * FROM ht; +EXPLAIN (analyze, costs off, timing off, summary off) SELECT * FROM ht; +ROLLBACK; +RESET timescaledb.enable_compressed_direct_batch_delete; + +-- ON DELETE CASCADE with compression and direct batch delete BEGIN; INSERT INTO fk_cascade(fk_cascade) VALUES ('fk_cascade'); INSERT INTO ht(time, fk_cascade) VALUES ('2020-01-01', 'fk_cascade');