Skip to content

Commit

Permalink
Merge branch 'main' into fix_memory_handling_branch
Browse files Browse the repository at this point in the history
  • Loading branch information
Medvecrab authored Feb 15, 2023
2 parents 9cec610 + 0963609 commit 88680a0
Show file tree
Hide file tree
Showing 11 changed files with 93 additions and 26 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/apt-arm-packages.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ jobs:
fail-fast: false
matrix:
# Debian images: 10 (buster), 11 (bullseye)
# Ubuntu images: 20.04 LTS (focal), 22.04 (jammy)
image: [ "debian:10-slim","debian:11-slim","ubuntu:focal", "ubuntu:jammy"]
# Ubuntu images: 20.04 LTS (focal), 22.04 (jammy), 22.10 (kinetic)
image: [ "debian:10-slim","debian:11-slim","ubuntu:focal", "ubuntu:jammy", "ubuntu:kinetic"]
pg: [ 12, 13, 14, 15 ]

steps:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/apt-packages.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ jobs:
fail-fast: false
matrix:
# Debian images: 10 (buster), 11 (bullseye)
# Ubuntu images: 18.04 LTS (bionic), 20.04 LTS (focal), 21.10 (impish), 22.04 (jammy)
image: [ "debian:10-slim", "debian:11-slim", "ubuntu:focal", "ubuntu:jammy"]
# Ubuntu images: 18.04 LTS (bionic), 20.04 LTS (focal), 21.10 (impish), 22.04 (jammy), 22.10 (kinetic)
image: [ "debian:10-slim", "debian:11-slim", "ubuntu:focal", "ubuntu:jammy", "ubuntu:kinetic"]
pg: [ 12, 13, 14, 15 ]
license: [ "TSL", "Apache"]
include:
Expand Down
10 changes: 9 additions & 1 deletion coccinelle/ereport_pg12.cocci
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,14 @@ expression E1, E2;

// We pass two or more expressions to ereport

+ /* ereport uses PG 12.3+ syntax */
+ /*
+ * Please enclose the auxiliary ereport arguments into parentheses for
+ * compatibility with PG 12. Example:
+ *
+ * ereport(ERROR, ( errmsg(...), errdetail(...) ) );
+ * ^-----------add these---------^
+ *
+ * See https://github.com/postgres/postgres/commit/a86715451653c730d637847b403b0420923956f7
+ */
ereport(K1, E1, E2, ...);

69 changes: 53 additions & 16 deletions src/guc.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ bool ts_guc_enable_parameterized_data_node_scan = true;
bool ts_guc_enable_async_append = true;
TSDLLEXPORT bool ts_guc_enable_compression_indexscan = true;
TSDLLEXPORT bool ts_guc_enable_skip_scan = true;
int ts_guc_max_open_chunks_per_insert = 10;
int ts_guc_max_cached_chunks_per_hypertable = 10;
int ts_guc_max_open_chunks_per_insert; /* default is computed at runtime */
int ts_guc_max_cached_chunks_per_hypertable = 100;
#ifdef USE_TELEMETRY
TelemetryLevel ts_guc_telemetry_level = TELEMETRY_DEFAULT;
char *ts_telemetry_cloud = NULL;
Expand All @@ -90,10 +90,10 @@ char *ts_telemetry_cloud = NULL;
TSDLLEXPORT char *ts_guc_license = TS_LICENSE_DEFAULT;
char *ts_last_tune_time = NULL;
char *ts_last_tune_version = NULL;
TSDLLEXPORT bool ts_guc_enable_2pc;
TSDLLEXPORT bool ts_guc_enable_2pc = true;
TSDLLEXPORT int ts_guc_max_insert_batch_size = 1000;
TSDLLEXPORT bool ts_guc_enable_connection_binary_data;
TSDLLEXPORT DistCopyTransferFormat ts_guc_dist_copy_transfer_format;
TSDLLEXPORT bool ts_guc_enable_connection_binary_data = true;
TSDLLEXPORT DistCopyTransferFormat ts_guc_dist_copy_transfer_format = DCTF_Auto;
TSDLLEXPORT bool ts_guc_enable_client_ddl_on_data_nodes = false;
TSDLLEXPORT char *ts_guc_ssl_dir = NULL;
TSDLLEXPORT char *ts_guc_passfile = NULL;
Expand All @@ -104,9 +104,15 @@ TSDLLEXPORT int ts_guc_hypertable_replication_factor_default = 1;

#ifdef TS_DEBUG
bool ts_shutdown_bgw = false;
char *ts_current_timestamp_mock = "";
char *ts_current_timestamp_mock = NULL;
#endif

/*
* We have to understand if we have finished initializing the GUCs, so that we
* know when it's OK to check their values for mutual consistency.
*/
static bool gucs_are_initialized = false;

/* Hook for plugins to allow additional SSL options */
set_ssl_options_hook_type ts_set_ssl_options_hook = NULL;

Expand All @@ -117,11 +123,44 @@ ts_assign_ssl_options_hook(void *fn)
ts_set_ssl_options_hook = (set_ssl_options_hook_type) fn;
}

/*
* Warn about the mismatched cache sizes that can lead to cache thrashing.
*/
static void
validate_chunk_cache_sizes(int hypertable_chunks, int insert_chunks)
{
/*
* Note that this callback is also called when the individual GUCs are
* initialized, so we are going to see temporary mismatched values here.
* That's why we also have to check that the GUC initialization have
* finished.
*/
if (gucs_are_initialized && insert_chunks > hypertable_chunks)
{
ereport(WARNING,
(errmsg("insert cache size is larger than hypertable chunk cache size"),
errdetail("insert cache size is %d, hypertable chunk cache size is %d",
insert_chunks,
hypertable_chunks),
errhint("This is a configuration problem. Either increase "
"timescaledb.max_cached_chunks_per_hypertable (preferred) or decrease "
"timescaledb.max_open_chunks_per_insert.")));
}
}

static void
assign_max_cached_chunks_per_hypertable_hook(int newval, void *extra)
{
/* invalidate the hypertable cache to reset */
ts_hypertable_cache_invalidate_callback();

validate_chunk_cache_sizes(newval, ts_guc_max_open_chunks_per_insert);
}

static void
assign_max_open_chunks_per_insert_hook(int newval, void *extra)
{
validate_chunk_cache_sizes(ts_guc_max_cached_chunks_per_hypertable, newval);
}

void
Expand Down Expand Up @@ -453,27 +492,20 @@ _guc_init(void)
"Maximum open chunks per insert",
"Maximum number of open chunk tables per insert",
&ts_guc_max_open_chunks_per_insert,
Min(work_mem * INT64CONST(1024) / INT64CONST(25000),
PG_INT16_MAX), /* Measurements via
* `MemoryContextStats(TopMemoryContext)`
* show chunk insert
* state memory context
* takes up ~25K bytes
* (work_mem is in
* kbytes) */
1024,
0,
PG_INT16_MAX,
PGC_USERSET,
0,
NULL,
NULL,
assign_max_open_chunks_per_insert_hook,
NULL);

DefineCustomIntVariable("timescaledb.max_cached_chunks_per_hypertable",
"Maximum cached chunks",
"Maximum number of chunks stored in the cache",
&ts_guc_max_cached_chunks_per_hypertable,
100,
1024,
0,
65536,
PGC_USERSET,
Expand Down Expand Up @@ -591,6 +623,11 @@ _guc_init(void)
NULL,
NULL,
NULL);

gucs_are_initialized = true;

validate_chunk_cache_sizes(ts_guc_max_cached_chunks_per_hypertable,
ts_guc_max_open_chunks_per_insert);
}

void
Expand Down
2 changes: 1 addition & 1 deletion src/ts_catalog/metadata.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ convert_type_to_text(Datum value, Oid from_type)
if (!OidIsValid(outfunc))
TYPE_ERROR("output", from_type);

return DirectFunctionCall1(textin, CStringGetDatum(OidFunctionCall1(outfunc, value)));
return DirectFunctionCall1(textin, OidFunctionCall1(outfunc, value));
}

static Datum
Expand Down
2 changes: 1 addition & 1 deletion src/utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -879,7 +879,7 @@ ts_sub_integer_from_now(int64 interval, Oid time_dim_type, Oid now_func)
Datum now;
int64 res;

AssertArg(IS_INTEGER_TYPE(time_dim_type));
Assert(IS_INTEGER_TYPE(time_dim_type));

now = OidFunctionCall0(now_func);
switch (time_dim_type)
Expand Down
16 changes: 14 additions & 2 deletions test/expected/insert_single.out
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,15 @@ INSERT INTO "nondefault_mem_settings" VALUES
('2001-03-20T09:00:00', 30.6),
('2002-03-20T09:00:00', 31.9),
('2003-03-20T09:00:00', 32.9);
--warning about mismatched cache sizes
SET timescaledb.max_open_chunks_per_insert = 100;
WARNING: insert cache size is larger than hypertable chunk cache size
SET timescaledb.max_cached_chunks_per_hypertable = 10;
WARNING: insert cache size is larger than hypertable chunk cache size
INSERT INTO "nondefault_mem_settings" VALUES
('2001-05-20T09:00:00', 36.6),
('2002-05-20T09:00:00', 37.9),
('2003-05-20T09:00:00', 38.9);
--unlimited
SET timescaledb.max_open_chunks_per_insert = 0;
SET timescaledb.max_cached_chunks_per_hypertable = 0;
Expand All @@ -497,10 +506,13 @@ SELECT * FROM "nondefault_mem_settings";
Tue Mar 20 09:00:00 2001 | 30.6
Wed Mar 20 09:00:00 2002 | 31.9
Thu Mar 20 09:00:00 2003 | 32.9
Sun May 20 09:00:00 2001 | 36.6
Mon May 20 09:00:00 2002 | 37.9
Tue May 20 09:00:00 2003 | 38.9
Fri Apr 20 09:00:00 2001 | 33.6
Sat Apr 20 09:00:00 2002 | 34.9
Sun Apr 20 09:00:00 2003 | 35.9
(11 rows)
(14 rows)

--test rollback
BEGIN;
Expand All @@ -527,7 +539,7 @@ SAVEPOINT savepoint_2;
SAVEPOINT
\set ON_ERROR_STOP 0
INSERT INTO "data_records" ("time", "value") VALUES (3, -1);
ERROR: new row for relation "_hyper_13_34_chunk" violates check constraint "data_records_value_check"
ERROR: new row for relation "_hyper_13_37_chunk" violates check constraint "data_records_value_check"
\set ON_ERROR_STOP 1
ROLLBACK TO SAVEPOINT savepoint_2;
ROLLBACK
Expand Down
8 changes: 8 additions & 0 deletions test/sql/insert_single.sql
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,14 @@ INSERT INTO "nondefault_mem_settings" VALUES
('2002-03-20T09:00:00', 31.9),
('2003-03-20T09:00:00', 32.9);

--warning about mismatched cache sizes
SET timescaledb.max_open_chunks_per_insert = 100;
SET timescaledb.max_cached_chunks_per_hypertable = 10;
INSERT INTO "nondefault_mem_settings" VALUES
('2001-05-20T09:00:00', 36.6),
('2002-05-20T09:00:00', 37.9),
('2003-05-20T09:00:00', 38.9);

--unlimited
SET timescaledb.max_open_chunks_per_insert = 0;
SET timescaledb.max_cached_chunks_per_hypertable = 0;
Expand Down
2 changes: 1 addition & 1 deletion tsl/src/data_node.c
Original file line number Diff line number Diff line change
Expand Up @@ -1593,7 +1593,7 @@ Datum
data_node_alter(PG_FUNCTION_ARGS)
{
const char *node_name = PG_ARGISNULL(0) ? NULL : NameStr(*PG_GETARG_NAME(0));
const char *host = PG_ARGISNULL(1) ? NULL : TextDatumGetCString(PG_GETARG_TEXT_P(1));
const char *host = PG_ARGISNULL(1) ? NULL : TextDatumGetCString(PG_GETARG_DATUM(1));
const char *database = PG_ARGISNULL(2) ? NULL : NameStr(*PG_GETARG_NAME(2));
int port = PG_ARGISNULL(3) ? -1 : PG_GETARG_INT32(3);
bool available_is_null = PG_ARGISNULL(4);
Expand Down
1 change: 1 addition & 0 deletions tsl/test/expected/dist_copy_available_dns.out
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ SELECT * FROM chunk_query_data_node WHERE hypertable_name = 'uk_price_paid' LIMI
(5 rows)

set timescaledb.max_open_chunks_per_insert = 1117;
WARNING: insert cache size is larger than hypertable chunk cache size
SET ROLE :ROLE_CLUSTER_SUPERUSER;
SELECT * FROM alter_data_node('data_node_1', available=>true);
node_name | host | port | database | available
Expand Down
1 change: 1 addition & 0 deletions tsl/test/expected/dist_copy_long.out
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ select count(*) from uk_price_paid;

truncate uk_price_paid;
set timescaledb.max_open_chunks_per_insert = 1117;
WARNING: insert cache size is larger than hypertable chunk cache size
\copy uk_price_paid from program 'zcat < data/prices-100k-random-1.tsv.gz';
select count(*) from uk_price_paid;
count
Expand Down

0 comments on commit 88680a0

Please sign in to comment.