From dcb5db4e6004bc3d9c0b2ce325a1c1d7b74bd672 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 8 Aug 2024 17:47:11 +0800 Subject: [PATCH 01/44] cdc: deprecate the enable-old-value to the previous v7.4 release-note (#18538) --- releases/release-7.4.0.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/releases/release-7.4.0.md b/releases/release-7.4.0.md index 96472dab8c6b5..525630df46aeb 100644 --- a/releases/release-7.4.0.md +++ b/releases/release-7.4.0.md @@ -321,10 +321,11 @@ Quick access: [Quick start](https://docs.pingcap.com/tidb/v7.4/quick-start-with- | TiCDC | [`large-message-handle-compression`](/ticdc/ticdc-sink-to-kafka.md#ticdc-data-compression) | Newly added | Controls whether to enable compression during encoding. The default value is empty, which means not enabled. | | TiCDC | [`large-message-handle-option`](/ticdc/ticdc-sink-to-kafka.md#send-large-messages-to-external-storage) | Modified | This configuration item adds a new value `claim-check`. When it is set to `claim-check`, TiCDC Kafka sink supports sending the message to external storage when the message size exceeds the limit and sends a message to Kafka containing the address of this large message in external storage. | -## Deprecated features +## Deprecated and removed features + [Mydumper](https://docs.pingcap.com/tidb/v4.0/mydumper-overview) will be deprecated in v7.5.0 and most of its features have been replaced by [Dumpling](/dumpling-overview.md). It is strongly recommended that you use Dumpling instead of mydumper. + TiKV-importer will be deprecated in v7.5.0. It is strongly recommended that you use the [Physical Import Mode of TiDB Lightning](/tidb-lightning/tidb-lightning-physical-import-mode.md) as an alternative. ++ The `enable-old-value` parameter of TiCDC is removed. [#9667](https://github.com/pingcap/tiflow/issues/9667) @[3AceShowHand](https://github.com/3AceShowHand) ## Improvements From 88684813dcc6df4f47feb9de218220c8698aea00 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Fri, 9 Aug 2024 10:45:40 +0800 Subject: [PATCH 02/44] sys var: add more contents to `tidb_remove_orderby_in_subquery` (#18530) --- system-variables.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/system-variables.md b/system-variables.md index f52d7083c9c1a..27d4fec816dbc 100644 --- a/system-variables.md +++ b/system-variables.md @@ -4953,6 +4953,8 @@ SHOW WARNINGS; - Type: Boolean - Default value: Before v7.2.0, the default value is `OFF`. Starting from v7.2.0, the default value is `ON`. - Specifies whether to remove `ORDER BY` clause in a subquery. +- In the ISO/IEC SQL standard, `ORDER BY` is mainly used to sort the results of top-level queries. For subqueries, the standard does not require that the results be sorted by `ORDER BY`. +- To sort subquery results, you can usually handle it in the outer query, such as using the window function or using `ORDER BY` again in the outer query. Doing so ensures the order of the final result set. ### tidb_replica_read New in v4.0 From b23af507ee121adcb73cd32920b9e59ddbea08c8 Mon Sep 17 00:00:00 2001 From: Aolin Date: Fri, 9 Aug 2024 11:29:11 +0800 Subject: [PATCH 03/44] cdc: update output-raw-change-event comment link (#18522) --- ticdc/ticdc-changefeed-config.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ticdc/ticdc-changefeed-config.md b/ticdc/ticdc-changefeed-config.md index bda27765af597..d535f5c01ed26 100644 --- a/ticdc/ticdc-changefeed-config.md +++ b/ticdc/ticdc-changefeed-config.md @@ -291,7 +291,7 @@ sasl-oauth-grant-type = "client_credentials" # The audience in the Kafka SASL OAUTHBEARER authentication. The default value is empty. This parameter is optional when the OAUTHBEARER authentication is used. sasl-oauth-audience = "kafka" -# The following configuration item controls whether to output the original data change event. The default value is false, which means that for a non-MySQL sink when the primary key or the non-null unique key is changed in an `UPDATE` event, TiCDC splits the event into two events, `DELETE` and `INSERT`, and ensures that all events are sorted in the order in which the `DELETE` event precedes the `INSERT` event. Setting it to true means that the original event is output directly without splitting. +# The following configuration item controls whether to output the original data change event. The default value is false. For more information, see https://docs.pingcap.com/tidb/dev/ticdc-split-update-behavior#control-whether-to-split-primary-or-unique-key-update-events. # output-raw-change-event = false # The following configuration is only required when using Avro as the protocol and AWS Glue Schema Registry: @@ -344,7 +344,7 @@ batching-max-publish-delay=10 # The timeout for a Pulsar producer to send a message. The value is 30 seconds by default. send-timeout=30 -# The following configuration item controls whether to output the original data change event. The default value is false, which means that for a non-MySQL sink when the primary key or the non-null unique key is changed in an `UPDATE` event, TiCDC splits the event into two events, `DELETE` and `INSERT`, and ensures that all events are sorted in the order in which the `DELETE` event precedes the `INSERT` event. Setting it to true means that the original event is output directly without splitting. +# The following configuration item controls whether to output the original data change event. The default value is false. For more information, see https://docs.pingcap.com/tidb/dev/ticdc-split-update-behavior#control-whether-to-split-primary-or-unique-key-update-events. # output-raw-change-event = false [sink.cloud-storage-config] @@ -366,6 +366,6 @@ file-cleanup-cron-spec = "0 0 2 * * *" # The concurrency for uploading a single file. # The default value is 1, which means concurrency is disabled. flush-concurrency = 1 -# The following configuration item controls whether to output the original data change event. The default value is false, which means that for a non-MySQL sink when the primary key or the non-null unique key is changed in an `UPDATE` event, TiCDC splits the event into two events, `DELETE` and `INSERT`, and ensures that all events are sorted in the order in which the `DELETE` event precedes the `INSERT` event. Setting it to true means that the original event is output directly without splitting. +# The following configuration item controls whether to output the original data change event. The default value is false. For more information, see https://docs.pingcap.com/tidb/dev/ticdc-split-update-behavior#control-whether-to-split-primary-or-unique-key-update-events. output-raw-change-event = false ``` From 1cef35f1b8751b9ef9b47943d0a378c19ee3e84d Mon Sep 17 00:00:00 2001 From: Aolin Date: Tue, 13 Aug 2024 14:24:02 +0800 Subject: [PATCH 04/44] fix: use `""` to represent empty string (#18562) --- tiproxy/tiproxy-configuration.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tiproxy/tiproxy-configuration.md b/tiproxy/tiproxy-configuration.md index e1091c377765c..15497b45d0c97 100644 --- a/tiproxy/tiproxy-configuration.md +++ b/tiproxy/tiproxy-configuration.md @@ -85,10 +85,10 @@ Configuration for SQL port. #### `proxy-protocol` -+ Default value: `` ++ Default value: `""` + Support hot-reload: yes, but only for new connections -+ Possible values: ``, `v2` -+ Enable the [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) on the port. By enabling the PROXY protocol, TiProxy can pass the real client IP address to TiDB. `v2` indicates using the PROXY protocol version 2, and `` indicates disabling the PROXY protocol. If the PROXY protocol is enabled on TiProxy, you need to also enable the [PROXY protocol](/tidb-configuration-file.md#proxy-protocol) on the TiDB server. ++ Possible values: `""`, `"v2"` ++ Enable the [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) on the port. By enabling the PROXY protocol, TiProxy can pass the real client IP address to TiDB. `"v2"` indicates using the PROXY protocol version 2, and `""` indicates disabling the PROXY protocol. If the PROXY protocol is enabled on TiProxy, you need to also enable the [PROXY protocol](/tidb-configuration-file.md#proxy-protocol) on the TiDB server. ### api @@ -102,10 +102,10 @@ Configurations for HTTP gateway. #### `proxy-protocol` -+ Default value: `` ++ Default value: `""` + Support hot-reload: no -+ Possible values: ``, `v2` -+ Enable the [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) on the port. `v2` indicates using the PROXY protocol version 2, and `` indicates disabling the PROXY protocol. ++ Possible values: `""`, `"v2"` ++ Enable the [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) on the port. `"v2"` indicates using the PROXY protocol version 2, and `""` indicates disabling the PROXY protocol. ### balance @@ -146,7 +146,7 @@ Configurations for the load balancing policy of TiProxy. #### `filename` -+ Default value: `` ++ Default value: `""` + Support hot-reload: yes + Log file path. Non empty value will enable logging to file. When TiProxy is deployed with TiUP, the filename is set automatically. From bf556133b0b0376194485a18cd5b20b28f80b842 Mon Sep 17 00:00:00 2001 From: Aolin Date: Wed, 14 Aug 2024 08:54:07 +0800 Subject: [PATCH 05/44] add an example of using AUTO_RANDOM with PRE_SPLIT_REGIONS (#18564) --- auto-random.md | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/auto-random.md b/auto-random.md index 0545fec78139e..b7b0f78b04420 100644 --- a/auto-random.md +++ b/auto-random.md @@ -47,7 +47,7 @@ When you execute an `INSERT` statement: - If you do not explicitly specify the value of the `AUTO_RANDOM` column, TiDB generates a random value and inserts it into the table. ```sql -tidb> CREATE TABLE t (a BIGINT PRIMARY KEY AUTO_RANDOM, b VARCHAR(255)); +tidb> CREATE TABLE t (a BIGINT PRIMARY KEY AUTO_RANDOM, b VARCHAR(255)) /*T! PRE_SPLIT_REGIONS=2 */ ; Query OK, 0 rows affected, 1 warning (0.01 sec) tidb> INSERT INTO t(a, b) VALUES (1, 'string'); @@ -76,6 +76,29 @@ tidb> SELECT * FROM t; | 4899916394579099651 | string3 | +---------------------+---------+ 3 rows in set (0.00 sec) + +tidb> SHOW CREATE TABLE t; ++-------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Table | Create Table | ++-------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| t | CREATE TABLE `t` ( + `a` bigint(20) NOT NULL /*T![auto_rand] AUTO_RANDOM(5) */, + `b` varchar(255) DEFAULT NULL, + PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T! PRE_SPLIT_REGIONS=2 */ | ++-------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +1 row in set (0.00 sec) + +tidb> SHOW TABLE t REGIONS; ++-----------+-----------------------------+-----------------------------+-----------+-----------------+---------------------+------------+---------------+------------+----------------------+------------------+------------------------+------------------+ +| REGION_ID | START_KEY | END_KEY | LEADER_ID | LEADER_STORE_ID | PEERS | SCATTERING | WRITTEN_BYTES | READ_BYTES | APPROXIMATE_SIZE(MB) | APPROXIMATE_KEYS | SCHEDULING_CONSTRAINTS | SCHEDULING_STATE | ++-----------+-----------------------------+-----------------------------+-----------+-----------------+---------------------+------------+---------------+------------+----------------------+------------------+------------------------+------------------+ +| 62798 | t_158_ | t_158_r_2305843009213693952 | 62810 | 28 | 62811, 62812, 62810 | 0 | 151 | 0 | 1 | 0 | | | +| 62802 | t_158_r_2305843009213693952 | t_158_r_4611686018427387904 | 62803 | 1 | 62803, 62804, 62805 | 0 | 39 | 0 | 1 | 0 | | | +| 62806 | t_158_r_4611686018427387904 | t_158_r_6917529027641081856 | 62813 | 4 | 62813, 62814, 62815 | 0 | 160 | 0 | 1 | 0 | | | +| 9289 | t_158_r_6917529027641081856 | 78000000 | 48268 | 1 | 48268, 58951, 62791 | 0 | 10628 | 43639 | 2 | 7999 | | | ++-----------+-----------------------------+-----------------------------+-----------+-----------------+---------------------+------------+---------------+------------+----------------------+------------------+------------------------+------------------+ +4 rows in set (0.00 sec) ``` The `AUTO_RANDOM(S, R)` column value automatically assigned by TiDB has a total of 64 bits: @@ -101,6 +124,7 @@ The structure of an `AUTO_RANDOM` value without a signed bit is as follows: - The content of the shard bits is obtained by calculating the hash value of the starting time of the current transaction. To use a different length of shard bits (such as 10), you can specify `AUTO_RANDOM(10)` when creating the table. - The value of the auto-increment bits is stored in the storage engine and allocated sequentially. Each time a new value is allocated, the value is incremented by 1. The auto-increment bits ensure that the values of `AUTO_RANDOM` are unique globally. When the auto-increment bits are exhausted, an error `Failed to read auto-increment value from storage engine` is reported when the value is allocated again. - Value range: the maximum number of bits for the final generated value = shard bits + auto-increment bits. The range of a signed column is `[-(2^(R-1))+1, (2^(R-1))-1]`, and the range of an unsigned column is `[0, (2^R)-1]`. +- You can use `AUTO_RANDOM` with `PRE_SPLIT_REGIONS`. When a table is created successfully, `PRE_SPLIT_REGIONS` pre-splits data in the table into the number of Regions as specified by `2^(PRE_SPLIT_REGIONS)`. > **Note:** > From ab98ce6dc3901756c968b4d22e0d65ee1dfb8882 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Wed, 14 Aug 2024 10:06:07 +0800 Subject: [PATCH 06/44] sys var: add global scope for `tidb_low_resolution_tso` (#18445) --- system-variables.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system-variables.md b/system-variables.md index 27d4fec816dbc..2c022927b427f 100644 --- a/system-variables.md +++ b/system-variables.md @@ -3464,12 +3464,13 @@ For a system upgraded to v5.0 from an earlier version, if you have not modified ### tidb_low_resolution_tso -- Scope: SESSION +- Scope: SESSION | GLOBAL - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No - Type: Boolean - Default value: `OFF` - This variable is used to set whether to enable the low-precision TSO feature. After this feature is enabled, TiDB uses the cached timestamp to read data. The cached timestamp is updated every 2 seconds by default. Starting from v8.0.0, you can configure the update interval by [`tidb_low_resolution_tso_update_interval`](#tidb_low_resolution_tso_update_interval-new-in-v800). - The main applicable scenario is to reduce the overhead of acquiring TSO for small read-only transactions when reading old data is acceptable. +- Starting from v8.3.0, this variable supports the GLOBAL scope. ### `tidb_low_resolution_tso_update_interval` New in v8.0.0 From 5bf6008c3b8daacf0211b4e222a3676f6d67af00 Mon Sep 17 00:00:00 2001 From: Aolin Date: Wed, 14 Aug 2024 11:16:37 +0800 Subject: [PATCH 07/44] update pre_split_regions usage scenario (#18563) --- sql-statements/sql-statement-split-region.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql-statements/sql-statement-split-region.md b/sql-statements/sql-statement-split-region.md index c5a10cdacb017..9cad8f7116967 100644 --- a/sql-statements/sql-statement-split-region.md +++ b/sql-statements/sql-statement-split-region.md @@ -355,11 +355,11 @@ You can specify the partition to be split. ## pre_split_regions -To have evenly split Regions when a table is created, it is recommended you use `SHARD_ROW_ID_BITS` together with `PRE_SPLIT_REGIONS`. When a table is created successfully, `PRE_SPLIT_REGIONS` pre-spilts tables into the number of Regions as specified by `2^(PRE_SPLIT_REGIONS)`. +When creating a table with the `AUTO_RANDOM` or `SHARD_ROW_ID_BITS` attribute, you can also specify the `PRE_SPLIT_REGIONS` option if you want to evenly pre-split the table into Regions immediately after the table is created. The number of pre-split Regions for a table is `2^(PRE_SPLIT_REGIONS)`. > **Note:** > -> The value of `PRE_SPLIT_REGIONS` must be less than or equal to that of `SHARD_ROW_ID_BITS`. +> The value of `PRE_SPLIT_REGIONS` must be less than or equal to that of `SHARD_ROW_ID_BITS` or `AUTO_RANDOM`. The `tidb_scatter_region` global variable affects the behavior of `PRE_SPLIT_REGIONS`. This variable controls whether to wait for Regions to be pre-split and scattered before returning results after the table creation. If there are intensive writes after creating the table, you need to set the value of this variable to `1`, then TiDB will not return the results to the client until all the Regions are split and scattered. Otherwise, TiDB writes the data before the scattering is completed, which will have a significant impact on write performance. @@ -384,7 +384,7 @@ region4: [ 3<<61 , +inf ) > **Note:** > -> The Region split by the Split Region statement is controlled by the [Region merge](/best-practices/pd-scheduling-best-practices.md#region-merge) scheduler in PD. To avoid PD re-merging the newly split Region soon after, you need to [dynamically modify](/pd-control.md) configuration items related to the Region merge feature. +> The Region split by the Split Region statement is controlled by the [Region merge](/best-practices/pd-scheduling-best-practices.md#region-merge) scheduler in PD. To avoid PD re-merging the newly split Region soon after, you need to use [table attributes](/table-attributes.md) or [dynamically modify](/pd-control.md) configuration items related to the Region merge feature. From d7794c218b6ea0bec9b33dc9793f080b1663d91c Mon Sep 17 00:00:00 2001 From: Grace Cai Date: Wed, 14 Aug 2024 11:22:36 +0800 Subject: [PATCH 08/44] chore: fix a bug that caused delays in fetching duplicate release notes (#18571) --- ...ase_notes_update_pr_author_info_add_dup.py | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/scripts/release_notes_update_pr_author_info_add_dup.py b/scripts/release_notes_update_pr_author_info_add_dup.py index 73d9f862c6ed1..73867d1e596d9 100644 --- a/scripts/release_notes_update_pr_author_info_add_dup.py +++ b/scripts/release_notes_update_pr_author_info_add_dup.py @@ -70,7 +70,7 @@ def store_exst_rn(ext_path, version): else: return 0 -def get_pr_info_from_github(cp_pr_link,cp_pr_title, current_pr_author): +def get_pr_info_from_github(row_number, cp_pr_link,cp_pr_title, current_pr_author): g = Github(access_token, timeout=30)# Create a Github object with the access token target_pr_number_existence = 1 @@ -103,9 +103,10 @@ def get_pr_info_from_github(cp_pr_link,cp_pr_title, current_pr_author): pr_obj = repo_obj.get_pull(int(target_pr_number))# Get the pull request object pr_author = pr_obj.user.login # Get the author of the pull request except: - print("Failed to get the original PR information for this PR: " + cp_pr_link) + print(f"Row {row_number}: failed to find the non-bot author for this PR ({cp_pr_link}) created by {current_pr_author}.\n") else: pr_author = current_pr_author # Use the current author if the cherry-pick PR cannot be found + print(f"Row {row_number}: failed to find the non-bot author for this PR ({cp_pr_link}) created by {current_pr_author}.\n") return(pr_author) @@ -135,14 +136,24 @@ def update_pr_author_and_release_notes(excel_path): # If pr_author is ti-chi-bot or ti-srebot current_pr_author = row[pr_author_index] current_formated_rn= row[pr_formated_rn_index] - pr_response = requests.get(row[pr_link_index]) - if (current_pr_author in ['ti-chi-bot', 'ti-srebot']) and (pr_response.status_code == 200): - print ("Replacing the author info for row " + str(row_index) + ".") - actual_pr_author = get_pr_info_from_github(row[pr_link_index], row[pr_title_index], current_pr_author) # Get the PR author according to the cherry-pick PR - pr_author_cell = sheet.cell(row=row_index, column=pr_author_index+1, value = actual_pr_author)#Fill in the pr_author_cell - updated_formated_rn = current_formated_rn.replace("[{}](https://github.com/{}".format(current_pr_author, current_pr_author),"[{}](https://github.com/{}".format(actual_pr_author, actual_pr_author)) - formated_release_note_cell = sheet.cell(row=row_index, column=pr_formated_rn_index+1, value = updated_formated_rn) # Fill in the formated_release_note_cell - current_pr_author = actual_pr_author + + if (current_pr_author in ['ti-chi-bot', 'ti-srebot']): + try: + actual_pr_author = get_pr_info_from_github(str(row_index), row[pr_link_index], row[pr_title_index], current_pr_author) # Get the PR author according to the cherry-pick PR + if actual_pr_author != current_pr_author: + print ("Replacing the author info for row " + str(row_index) + ".") + pr_author_cell = sheet.cell(row=row_index, column=pr_author_index+1, value = actual_pr_author)#Fill in the pr_author_cell + updated_formated_rn = current_formated_rn.replace("[{}](https://github.com/{}".format(current_pr_author, current_pr_author),"[{}](https://github.com/{}".format(actual_pr_author, actual_pr_author)) + formated_release_note_cell = sheet.cell(row=row_index, column=pr_formated_rn_index+1, value = updated_formated_rn) # Fill in the formated_release_note_cell + current_pr_author = actual_pr_author + else: # Do nothing if non-bot author is not found. + pass + except: + pr_response = requests.get(row[pr_link_index]) + if pr_response.status_code != 200: + print (f"\nRow {str(row_index)}: failed to find the non-bot author for this PR ({row[pr_link_index]}) because this link cannot be accessed now.") + else: + print (f"\nRow {str(row_index)}: failed to find the non-bot author for this PR ({row[pr_link_index]}).") else: pass @@ -232,12 +243,12 @@ def create_release_file(version, dup_notes_levels, dup_notes): file.seek(0) file.write(content) file.truncate() - print(f'The v{version} release note is now created in the following directory: \n {release_file}') + print(f'\nThe v{version} release note is now created in the following directory: \n {release_file}') if __name__ == '__main__': note_pairs = store_exst_rn(ext_path, version) dup_notes, dup_notes_levels = update_pr_author_and_release_notes(release_note_excel) - print ("The bot author info in the excel is now replaced with the actual authors.") + print ("\nThe bot author info in the excel is now replaced with the actual authors.") version_parts = version.split('.') if len(version_parts) >= 2: create_release_file(version, dup_notes_levels, dup_notes) \ No newline at end of file From 57a8b03c6724b08bf98a67824582b37a32543a39 Mon Sep 17 00:00:00 2001 From: dongmen <20351731+asddongmen@users.noreply.github.com> Date: Wed, 14 Aug 2024 15:24:09 +0800 Subject: [PATCH 09/44] make bdr ddl replicate GA en (#18568) --- sql-statements/sql-statement-admin-bdr-role.md | 4 ---- ticdc/ticdc-bidirectional-replication.md | 10 ++++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/sql-statements/sql-statement-admin-bdr-role.md b/sql-statements/sql-statement-admin-bdr-role.md index 4781794542dfa..d13e6400ee108 100644 --- a/sql-statements/sql-statement-admin-bdr-role.md +++ b/sql-statements/sql-statement-admin-bdr-role.md @@ -9,10 +9,6 @@ summary: An overview of the usage of ADMIN [SET|SHOW|UNSET] BDR ROLE for the TiD - Use `ADMIN SHOW BDR ROLE` to show the BDR role of the cluster. - Use `ADMIN UNSET BDR ROLE` to unset the BDR role of the cluster. -> **Warning:** -> -> This feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed or removed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - ## Synopsis ```ebnf+diagram diff --git a/ticdc/ticdc-bidirectional-replication.md b/ticdc/ticdc-bidirectional-replication.md index 607f23dda2aae..09d5252fc00fa 100644 --- a/ticdc/ticdc-bidirectional-replication.md +++ b/ticdc/ticdc-bidirectional-replication.md @@ -89,14 +89,12 @@ Non-replicable DDLs include: To solve the problem of replicable DDLs and non-replicable DDLs, TiDB introduces the following BDR roles: -- `PRIMARY`: you can execute replicable DDLs, but cannot execute non-replicable DDLs. Replicable DDLs will be replicated to the downstream by TiCDC. -- `SECONDARY`: you cannot execute replicable DDLs or non-replicable DDLs, but can execute the DDLs replicated by TiCDC. +- `PRIMARY`: You can execute replicable DDLs, but not non-replicable DDLs. Replicable DDLs executed in a PRIMARY cluster will be replicated to the downstream by TiCDC. +- `SECONDARY`: You cannot execute replicable DDLs or non-replicable DDLs. However, DDLs executed in a PRIMARY cluster can be replicated to a SECONDARY cluster by TiCDC. -When no BDR role is set, you can execute any DDL. But after you set `bdr_mode=true` on TiCDC, the executed DDL will not be replicated by TiCDC. +When no BDR role is set, you can execute any DDL. However, the changefeed in BDR mode does not replicate any DDL on that cluster. -> **Warning:** -> -> This feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed or removed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +In short, in BDR mode, TiCDC only replicates replicable DDLs in the PRIMARY cluster to the downstream. ### Replication scenarios of replicable DDLs From ba0cddf4faf6177d11bd925d72d33cdfc3eeb68d Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Wed, 14 Aug 2024 15:51:08 +0800 Subject: [PATCH 10/44] update doc related to fast-create (#18517) --- accelerated-table-creation.md | 20 ++------------------ upgrade-tidb-using-tiup.md | 1 + 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/accelerated-table-creation.md b/accelerated-table-creation.md index f97eec9d5cfd0..ec3ee720f825c 100644 --- a/accelerated-table-creation.md +++ b/accelerated-table-creation.md @@ -8,9 +8,9 @@ aliases: ['/tidb/dev/ddl-v2/'] TiDB v7.6.0 introduces the system variable [`tidb_ddl_version`](https://docs.pingcap.com/tidb/v7.6/system-variables#tidb_enable_fast_create_table-new-in-v800) to support accelerating table creation, which improves the efficiency of bulk table creation. Starting from v8.0.0, this system variable is renamed to [`tidb_enable_fast_create_table`](/system-variables.md#tidb_enable_fast_create_table-new-in-v800). -TiDB uses the online asynchronous schema change algorithm to change the metadata. All DDL jobs are submitted to the `mysql.tidb_ddl_job` table, and the owner node pulls the DDL job to execute. After executing each phase of the online DDL algorithm, the DDL job is marked as completed and moved to the `mysql.tidb_ddl_history` table. Therefore, DDL statements can only be executed on the owner node and cannot be linearly extended. +When accelerated table creation is enabled via [`tidb_enable_fast_create_table`](/system-variables.md#tidb_enable_fast_create_table-new-in-v800), table creation statements with the same schema committed to the same TiDB node at the same time are merged into batch table creation statements to improve table creation performance. Therefore, to improve the table creation performance, try to connect to the same TiDB node, create tables with the same schema concurrently, and increase the concurrency appropriately. -However, for some DDL statements, it is not necessary to strictly follow the online DDL algorithm. For example, the `CREATE TABLE` statement only has two states for the job: `none` and `public`. Therefore, TiDB can simplify the execution process of DDL, and executes the `CREATE TABLE` statement on a non-owner node to accelerate table creation. +The merged batch table creation statements are executed within the same transaction, so if one statement of them fails, all of them will fail. > **Warning:** > @@ -39,19 +39,3 @@ To disable performance optimization for creating tables, set the value of this v ```sql SET GLOBAL tidb_enable_fast_create_table = OFF; ``` - -## Implementation principle - -The detailed implementation principle of performance optimization for table creation is as follows: - -1. Create a `CREATE TABLE` Job. - - The corresponding DDL Job is generated by parsing the `CREATE TABLE` statement. - -2. Execute the `CREATE TABLE` job. - - The TiDB node that receives the `CREATE TABLE` statement executes it directly, and then persists the table structure to TiKV. At the same time, the `CREATE TABLE` job is marked as completed and inserted into the `mysql.tidb_ddl_history` table. - -3. Synchronize the table information. - - TiDB notifies other nodes to synchronize the newly created table structure. diff --git a/upgrade-tidb-using-tiup.md b/upgrade-tidb-using-tiup.md index 66e8a904ea159..b3069dc80abd0 100644 --- a/upgrade-tidb-using-tiup.md +++ b/upgrade-tidb-using-tiup.md @@ -21,6 +21,7 @@ This document is targeted for the following upgrade paths: > 3. **DO NOT** upgrade a TiDB cluster when a DDL statement is being executed in the cluster (usually for the time-consuming DDL statements such as `ADD INDEX` and the column type changes). Before the upgrade, it is recommended to use the [`ADMIN SHOW DDL`](/sql-statements/sql-statement-admin-show-ddl.md) command to check whether the TiDB cluster has an ongoing DDL job. If the cluster has a DDL job, to upgrade the cluster, wait until the DDL execution is finished or use the [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) command to cancel the DDL job before you upgrade the cluster. > 4. If the TiDB version before upgrade is 7.1.0 or later, you can ignore the preceding warnings 2 and 3. For more information, see [limitations on using TiDB smooth upgrade](/smooth-upgrade-tidb.md#limitations). > 5. Be sure to read [limitations on user operations](/smooth-upgrade-tidb.md#limitations-on-user-operations) before upgrading your TiDB cluster using TiUP. +> 6. If the version of your current TiDB cluster is TiDB v7.6.0 to v8.2.0, the upgrade target version is v8.3.0 or later, and accelerated table creation is enabled via [`tidb_enable_fast_create_table`](/system-variables.md#tidb_enable_fast_create_table-new-in-v800), you need to first disable the accelerated table creation feature, and then enable it as needed after the upgrade is completed. Otherwise, some metadata KVs added by this feature remain in the cluster. Starting from v8.3.0, this feature is optimized. Upgrading to a later TiDB version no longer generates and retains this type of metadata KVs. > **Note:** > From 7c9d10563597c0e2d2c6a333c60cee8eb42bfcc4 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Wed, 14 Aug 2024 16:06:07 +0800 Subject: [PATCH 11/44] sys-var: add SESSION scope for `ddl_reorg_batch_size` and `ddl_reorg_worker_cnt` (#18585) --- system-variables.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system-variables.md b/system-variables.md index 2c022927b427f..2d1357acc582a 100644 --- a/system-variables.md +++ b/system-variables.md @@ -1670,7 +1670,7 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; > > This variable is read-only for [TiDB Serverless](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-serverless). -- Scope: GLOBAL +- Scope: SESSION | GLOBAL - Persists to cluster: Yes - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No - Type: Integer @@ -1701,7 +1701,7 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; > > This variable is read-only for [TiDB Serverless](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-serverless). -- Scope: GLOBAL +- Scope: SESSION | GLOBAL - Persists to cluster: Yes - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No - Type: Integer From 7e070f2eff5308e10351d653845391c5251a066e Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Wed, 14 Aug 2024 16:28:38 +0800 Subject: [PATCH 12/44] Update the log redaction documentation (#18487) --- log-redaction.md | 40 +++++++++++++++++++++++++------- pd-configuration-file.md | 3 ++- tiflash/tiflash-configuration.md | 17 ++++++++------ tikv-configuration-file.md | 6 ++++- 4 files changed, 48 insertions(+), 18 deletions(-) diff --git a/log-redaction.md b/log-redaction.md index 04c8ff7965de9..61108e93af67a 100644 --- a/log-redaction.md +++ b/log-redaction.md @@ -9,14 +9,12 @@ When TiDB provides detailed log information, it might print sensitive data (for ## Log redaction in TiDB side -To enable log redaction in the TiDB side, set the value of [`global.tidb_redact_log`](/system-variables.md#tidb_redact_log) to `1`. This configuration value defaults to `0`, which means that log redaction is disabled. +To enable log redaction in the TiDB side, set the value of [`global.tidb_redact_log`](/system-variables.md#tidb_redact_log) to `ON` or `MARKER`. This configuration value defaults to `OFF`, which means that log redaction is disabled. You can use the `set` syntax to set the global variable `tidb_redact_log`: -{{< copyable "sql" >}} - ```sql -set @@global.tidb_redact_log=1; +set @@global.tidb_redact_log = ON; ``` After the setting, all logs generated in new sessions are redacted: @@ -32,19 +30,43 @@ ERROR 1062 (23000): Duplicate entry '1' for key 't.a' The error log for the `INSERT` statement above is printed as follows: ``` -[2020/10/20 11:45:49.539 +08:00] [INFO] [conn.go:800] ["command dispatched failed"] [conn=5] [connInfo="id:5, addr:127.0.0.1:57222 status:10, collation:utf8_general_ci, user:root"] [command=Query] [status="inTxn:0, autocommit:1"] [sql="insert into t values ( ? ) , ( ? )"] [txn_mode=OPTIMISTIC] [err="[kv:1062]Duplicate entry '?' for key 't.a'"] +[2024/07/02 11:35:32.686 +08:00] [INFO] [conn.go:1146] ["command dispatched failed"] [conn=1482686470] [session_alias=] [connInfo="id:1482686470, addr:127.0.0.1:52258 status:10, collation:utf8mb4_0900_ai_ci, user:root"] [command=Query] [status="inTxn:0, autocommit:1"] [sql="insert into `t` values ( ... )"] [txn_mode=PESSIMISTIC] [timestamp=450859193514065921] [err="[kv:1062]Duplicate entry '?' for key 't.a'"] +``` + +From the preceding error log, you can see that when the value of `tidb_redact_log` is set to `ON`, sensitive information is replaced by the `?` mark in the TiDB log to avoid data security risks. + +In addition, TiDB provides the `MARKER` option. When the value of `tidb_redact_log` is set to `MARKER`, TiDB marks sensitive information in the log with `‹›` instead of replacing it directly, so you can customize the redaction rules. + +```sql +set @@global.tidb_redact_log = MARKER; +``` + +After the preceding configuration, the sensitive information is marked rather than replaced in all logs generated by new sessions: + +```sql +create table t (a int, unique key (a)); +Query OK, 0 rows affected (0.07 sec) + +insert into t values (1),(1); +ERROR 1062 (23000): Duplicate entry '‹1›' for key 't.a' +``` + +The error log is as follows: + +``` +[2024/07/02 11:35:01.426 +08:00] [INFO] [conn.go:1146] ["command dispatched failed"] [conn=1482686470] [session_alias=] [connInfo="id:1482686470, addr:127.0.0.1:52258 status:10, collation:utf8mb4_0900_ai_ci, user:root"] [command=Query] [status="inTxn:0, autocommit:1"] [sql="insert into `t` values ( ‹1› ) , ( ‹1› )"] [txn_mode=PESSIMISTIC] [timestamp=450859185309483010] [err="[kv:1062]Duplicate entry '‹1›' for key 't.a'"] ``` -From the error log above, you can see that all sensitive information is shielded using `?` after `tidb_redact_log` is enabled. In this way, data security risks are avoided. +As you can see from the preceding error log, after you set `tidb_redact_log` to `MARKER`, TiDB marks sensitive information using `‹ ›` in the log. You can customize redaction rules to handle sensitive information in the log as needed. ## Log redaction in TiKV side -To enable log redaction in the TiKV side, set the value of [`security.redact-info-log`](/tikv-configuration-file.md#redact-info-log-new-in-v408) to `true`. This configuration value defaults to `false`, which means that log redaction is disabled. +To enable log redaction in the TiKV side, set the value of [`security.redact-info-log`](/tikv-configuration-file.md#redact-info-log-new-in-v408) to `true` or `"marker"`. This configuration value defaults to `false`, which means that log redaction is disabled. ## Log redaction in PD side -To enable log redaction in the PD side, set the value of [`security.redact-info-log`](/pd-configuration-file.md#redact-info-log-new-in-v50) to `true`. This configuration value defaults to `false`, which means that log redaction is disabled. +To enable log redaction in the PD side, set the value of [`security.redact-info-log`](/pd-configuration-file.md#redact-info-log-new-in-v50) to `true` or `"marker"`. This configuration value defaults to `false`, which means that log redaction is disabled. ## Log redaction in TiFlash side -To enable log redaction in the TiFlash side, set both the [`security.redact_info_log`](/tiflash/tiflash-configuration.md#configure-the-tiflashtoml-file) value in tiflash-server and the [`security.redact-info-log`](/tiflash/tiflash-configuration.md#configure-the-tiflash-learnertoml-file) value in tiflash-learner to `true`. Both configuration values default to `false`, which means that log redaction is disabled. +To enable log redaction in the TiFlash side, set both the [`security.redact_info_log`](/tiflash/tiflash-configuration.md#configure-the-tiflashtoml-file) value in tiflash-server and the [`security.redact-info-log`](/tiflash/tiflash-configuration.md#configure-the-tiflash-learnertoml-file) value in tiflash-learner to `true` or `"marker"`. Both configuration values default to `false`, which means that log redaction is disabled. diff --git a/pd-configuration-file.md b/pd-configuration-file.md index f727ce33ba9b5..eebe1e5420430 100644 --- a/pd-configuration-file.md +++ b/pd-configuration-file.md @@ -198,8 +198,9 @@ Configuration items related to security ### `redact-info-log` New in v5.0 + Controls whether to enable log redaction in the PD log -+ When you set the configuration value to `true`, user data is redacted in the PD log. ++ Optional value: `false`, `true`, `"marker"` + Default value: `false` ++ For details on how to use it, see [Log redaction in PD side](/log-redaction.md#log-redaction-in-pd-side). ## `log` diff --git a/tiflash/tiflash-configuration.md b/tiflash/tiflash-configuration.md index e8c383b6e70d3..78f036d1d2d63 100644 --- a/tiflash/tiflash-configuration.md +++ b/tiflash/tiflash-configuration.md @@ -256,10 +256,11 @@ delta_index_cache_size = 0 ## Security settings take effect starting from v4.0.5. [security] - ## New in v5.0. This configuration item enables or disables log redaction. Value options: true, false, marker. The marker option is introduced in v8.2.0. - ## The default value is false, which means that log redaction is disabled. - ## If the configuration item is set to true, all user data in the log is replaced by `?`. - ## If the configuration item is set to marker, all user data in the log is wrapped in `‹ ›`. If user data contains `‹` or `›`, `‹` is escaped as `‹‹`, and `›` is escaped as `››`. Based on the marked logs, you can decide whether to desensitize the marked information when the logs are displayed. + ## New in v5.0. This configuration item enables or disables log redaction. Value options: `true`, `false`, `"on"`, `"off"`, and `"marker"`. The `"on"`, `"off"`, and `"marker"` options are introduced in v8.2.0. + ## If the configuration item is set to `false` or `"off"`, log redaction is disabled. + ## If the configuration item is set to `true` or `"on"`, all user data in the log is replaced by `?`. + ## If the configuration item is set to `"marker"`, all user data in the log is wrapped in `‹ ›`. If user data contains `‹` or `›`, `‹` is escaped as `‹‹`, and `›` is escaped as `››`. Based on the marked logs, you can decide whether to desensitize the marked information when the logs are displayed. + ## The default value is `false`. ## Note that you also need to set security.redact-info-log for tiflash-learner's logging in tiflash-learner.toml. # redact_info_log = false @@ -307,9 +308,11 @@ The parameters in `tiflash-learner.toml` are basically the same as those in TiKV snap-handle-pool-size = 2 [security] - ## New in v5.0. This configuration item enables or disables log redaction. Value options: true, false. - ## The default value is false, which means that log redaction is disabled. - ## If the configuration item is set to true, all user data in the log is replaced by `?`. + ## New in v5.0. This configuration item enables or disables log redaction. Value options: `true`, `false`, `"on"`, `"off"`, and `"marker"`. The `"on"`, `"off"`, and `"marker"` options are introduced in v8.3.0. + ## If the configuration item is set to `false` or `"off"`, log redaction is disabled. + ## If the configuration item is set to `true` or `"on"`, all user data in the log is replaced by `?`. + ## If the configuration item is set to `"marker"`, all user data in the log is wrapped in `‹ ›`. If user data contains `‹` or `›`, `‹` is escaped as `‹‹`, and `›` is escaped as `››`. Based on the marked logs, you can decide whether to desensitize the marked information when the logs are displayed. + ## The default value is `false`. redact-info-log = false [security.encryption] diff --git a/tikv-configuration-file.md b/tikv-configuration-file.md index 8c96e8528b37c..f07fdd1ca6c08 100644 --- a/tikv-configuration-file.md +++ b/tikv-configuration-file.md @@ -2041,8 +2041,12 @@ Configuration items related to security. ### `redact-info-log` New in v4.0.8 -+ This configuration item enables or disables log redaction. If the configuration value is set to `true`, all user data in the log will be replaced by `?`. ++ This configuration item enables or disables log redaction. Value options: `true`, `false`, `"on"`, `"off"`, and `"marker"`. The `"on"`, `"off"`, and `"marker"` options are introduced in v8.3.0. ++ If the configuration item is set to `false` or `"off"`, log redaction is disabled. ++ If the configuration item is set to `true` or `"on"`, all user data in the log is replaced by `?`. ++ If the configuration item is set to `"marker"`, all user data in the log is wrapped in `‹ ›`. If user data contains `‹` or `›`, `‹` is escaped as `‹‹`, and `›` is escaped as `››`. Based on the marked logs, you can decide whether to desensitize the marked information when the logs are displayed. + Default value: `false` ++ For details on how to use it, see [Log redaction in TiKV side](/log-redaction.md#log-redaction-in-tikv-side). ## security.encryption From cdfa6bf5b43367ffc56e79a00e7104832160bd94 Mon Sep 17 00:00:00 2001 From: Aolin Date: Thu, 15 Aug 2024 11:32:40 +0800 Subject: [PATCH 13/44] remove ambiguous TTL from glossary (#18588) --- glossary.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/glossary.md b/glossary.md index 39b96d20f7288..5b404be6358cd 100644 --- a/glossary.md +++ b/glossary.md @@ -173,7 +173,3 @@ Top SQL helps locate SQL queries that contribute to a high load of a TiDB or TiK ### TSO Because TiKV is a distributed storage system, it requires a global timing service, Timestamp Oracle (TSO), to assign a monotonically increasing timestamp. In TiKV, such a feature is provided by PD, and in Google [Spanner](http://static.googleusercontent.com/media/research.google.com/en//archive/spanner-osdi2012.pdf), this feature is provided by multiple atomic clocks and GPS. - -### TTL - -[Time to live (TTL)](/time-to-live.md) is a feature that allows you to manage TiDB data lifetime at the row level. For a table with the TTL attribute, TiDB automatically checks data lifetime and deletes expired data at the row level. From 567b0b37481467a399e80c5df52bdd47513aa04b Mon Sep 17 00:00:00 2001 From: Aolin Date: Thu, 15 Aug 2024 11:37:10 +0800 Subject: [PATCH 14/44] cdc: canal-json dml event commit-ts should be TSO (#18591) --- ticdc/ticdc-canal-json.md | 6 +++--- ticdc/ticdc-sink-to-kafka.md | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ticdc/ticdc-canal-json.md b/ticdc/ticdc-canal-json.md index c7c076298396e..c7ee505c3d922 100644 --- a/ticdc/ticdc-canal-json.md +++ b/ticdc/ticdc-canal-json.md @@ -64,7 +64,7 @@ TiCDC encodes a DDL Event into the following Canal-JSON format. "data": null, "old": null, "_tidb": { // TiDB extension field - "commitTs": 163963309467037594 + "commitTs": 429918007904436226 // A TiDB TSO timestamp } } ``` @@ -133,7 +133,7 @@ TiCDC encodes a row of DML data change event as follows: ], "old": null, "_tidb": { // TiDB extension field - "commitTs": 163963314122145239 + "commitTs": 429918007904436226 // A TiDB TSO timestamp } } ``` @@ -162,7 +162,7 @@ The following is an example of the WATERMARK Event. "data": null, "old": null, "_tidb": { // TiDB extension field - "watermarkTs": 429918007904436226 + "watermarkTs": 429918007904436226 // A TiDB TSO timestamp } } ``` diff --git a/ticdc/ticdc-sink-to-kafka.md b/ticdc/ticdc-sink-to-kafka.md index bbdebf2ca8d83..691d14a29c672 100644 --- a/ticdc/ticdc-sink-to-kafka.md +++ b/ticdc/ticdc-sink-to-kafka.md @@ -450,7 +450,7 @@ The message format with handle keys only is as follows: ], "old": null, "_tidb": { // TiDB extension fields - "commitTs": 163963314122145239, + "commitTs": 429918007904436226, // A TiDB TSO timestamp "onlyHandleKey": true } } @@ -516,7 +516,7 @@ The Kafka consumer receives a message that contains the address of the large mes ], "old": null, "_tidb": { // TiDB extension fields - "commitTs": 163963314122145239, + "commitTs": 429918007904436226, // A TiDB TSO timestamp "claimCheckLocation": "s3:/claim-check-bucket/${uuid}.json" } } From 3af3dfdcc1649d96141006700735188f74ec37ce Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 15 Aug 2024 13:49:10 +0800 Subject: [PATCH 15/44] sys var: add `tidb_enable_shared_lock_upgrade` (#18447) --- pessimistic-transaction.md | 4 +++- system-variables.md | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/pessimistic-transaction.md b/pessimistic-transaction.md index b759d2af95854..b2bfc808f7f7c 100644 --- a/pessimistic-transaction.md +++ b/pessimistic-transaction.md @@ -111,7 +111,9 @@ Pessimistic transactions in TiDB behave similarly to those in MySQL. See the min 2. TiDB does not support `SELECT LOCK IN SHARE MODE`. - When `SELECT LOCK IN SHARE MODE` is executed, it has the same effect as that without the lock, so the read or write operation of other transactions is not blocked. + TiDB does not support the `SELECT LOCK IN SHARE MODE` syntax by default. You can enable [`tidb_enable_noop_functions`](/system-variables.md#tidb_enable_noop_functions-new-in-v40) to make TiDB compatible with the `SELECT LOCK IN SHARE MODE` syntax. Executing `SELECT LOCK IN SHARE MODE` has the same effect as that without the lock, so it does not block read or write operations of other transactions. + + Starting from v8.3.0, TiDB supports using the [`tidb_enable_shared_lock_promotion`](/system-variables.md#tidb_enable_shared_lock_promotion-new-in-v830) system variable to enable the `SELECT LOCK IN SHARE MODE` statement to add locks. However, note that the lock added at this time is not a true shared lock, but an exclusive lock, which is consistent with `SELECT FOR UPDATE`. If you want to block writes to prevent data from being modified by write transactions in parallel during reads while keeping TiDB compatible with the `SELECT LOCK IN SHARE MODE` syntax, you can enable this variable. Enabling this variable takes effect on the `SELECT LOCK IN SHARE MODE` statement, regardless of whether [`tidb_enable_noop_functions`](/system-variables.md#tidb_enable_noop_functions-new-in-v40) is enabled or not. 3. DDL may result in failure of the pessimistic transaction commit. diff --git a/system-variables.md b/system-variables.md index 2d1357acc582a..3b28ba7193dbe 100644 --- a/system-variables.md +++ b/system-variables.md @@ -2567,6 +2567,16 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; - Value options: `OFF`, `ON` - This variable controls whether TiDB enables chunk objects cache. If the value is `ON`, TiDB prefers to use the cached chunk object and only requests from the system if the requested object is not in the cache. If the value is `OFF`, TiDB requests chunk objects from the system directly. +### tidb_enable_shared_lock_promotion New in v8.3.0 + +- Scope: SESSION | GLOBAL +- Persists to cluster: Yes +- Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No +- Type: Boolean +- Default value: `OFF` +- This variable controls whether to enable the feature of upgrading shared locks to exclusive locks. TiDB does not support `SELECT LOCK IN SHARE MODE` by default. When the variable value is `ON`, TiDB tries to upgrade the `SELECT LOCK IN SHARE MODE` statement to `SELECT FOR UPDATE` and add a pessimistic lock. The default value of this variable is `OFF`, which means that the feature of upgrading shared locks to exclusive locks is disabled. +- Enabling this variable takes effect on the `SELECT LOCK IN SHARE MODE` statement, regardless of whether [`tidb_enable_noop_functions`](/system-variables.md#tidb_enable_noop_functions-new-in-v40) is enabled or not. + ### tidb_enable_slow_log > **Note:** From 676d9c0f88020be751270f68e09c14676bac4fbe Mon Sep 17 00:00:00 2001 From: Rustin <29879298+Rustin170506@users.noreply.github.com> Date: Thu, 15 Aug 2024 14:44:41 +0800 Subject: [PATCH 16/44] tidb: add tidb_analyze_column_options (#18281) --- benchmark/benchmark-tidb-using-ch.md | 7 +++-- statistics.md | 47 +++++++++++----------------- system-variables.md | 18 ++++++++++- tiup/tiup-bench.md | 14 +++++++-- 4 files changed, 51 insertions(+), 35 deletions(-) diff --git a/benchmark/benchmark-tidb-using-ch.md b/benchmark/benchmark-tidb-using-ch.md index a585a5f7d336b..78f5c872cc826 100644 --- a/benchmark/benchmark-tidb-using-ch.md +++ b/benchmark/benchmark-tidb-using-ch.md @@ -7,7 +7,7 @@ summary: Learn how to run CH-benCHmark test on TiDB. This document describes how to test TiDB using CH-benCHmark. -CH-benCHmark is a mixed workload containing both [TPC-C](http://www.tpc.org/tpcc/) and [TPC-H](http://www.tpc.org/tpch/) tests. It is the most common workload to test HTAP systems. For more information, see [The mixed workload CH-benCHmark](https://research.tableau.com/sites/default/files/a8-cole.pdf). +CH-benCHmark is a mixed workload containing both [TPC-C](http://www.tpc.org/tpcc/) and [TPC-H](http://www.tpc.org/tpch/) tests. It is the most common workload to test HTAP systems. For more information, see [The mixed workload CH-benCHmark](https://dl.acm.org/doi/10.1145/1988842.1988850). Before running the CH-benCHmark test, you need to deploy [TiFlash](/tiflash/tiflash-overview.md) first, which is a TiDB's HTAP component. After you deploy TiFlash and [create TiFlash replicas](#create-tiflash-replicas), TiKV will replicate the latest data of TPC-C online transactions to TiFlash in real time, and the TiDB optimizer will automatically push down OLAP queries from TPC-H workload to the MPP engine of TiFlash for efficient execution. @@ -85,9 +85,10 @@ In the result of the above statement: ## Collect statistics -To ensure that the TiDB optimizer can generate the optimal execution plan, execute the following SQL statements to collect statistics in advance. +To ensure that the TiDB optimizer can generate the optimal execution plan, execute the following SQL statements to collect statistics in advance. **Be sure to set [`tidb_analyze_column_options`](/system-variables.md#tidb_analyze_column_options-new-in-v830) to `ALL`, otherwise collecting statistics can result in a significant drop in query performance.** ``` +set global tidb_analyze_column_options='ALL'; analyze table customer; analyze table district; analyze table history; @@ -166,4 +167,4 @@ tpmC: 93826.9, efficiency: 729.6% [Summary] Q7 - Count: 11, Sum(ms): 158928.2, Avg(ms): 14446.3 ``` -After the test is finished, you can execute the `tiup bench tpcc -H 172.16.5.140 -P 4000 -D tpcc --warehouses 1000 check` command to validate the data correctness. \ No newline at end of file +After the test is finished, you can execute the `tiup bench tpcc -H 172.16.5.140 -P 4000 -D tpcc --warehouses 1000 check` command to validate the data correctness. diff --git a/statistics.md b/statistics.md index b94055b38bfbe..16f2bd35e0250 100644 --- a/statistics.md +++ b/statistics.md @@ -123,13 +123,13 @@ When `IndexNameList` is empty, this syntax collects statistics on all indexes in > **Note:** > -> To ensure that the statistical information before and after the collection is consistent, when `tidb_analyze_version` is `2`, this syntax collects statistics on the entire table (including all columns and indexes), instead of only on indexes. +> To ensure that the statistical information before and after the collection is consistent, when `tidb_analyze_version` is `2`, this syntax collects statistics on the indexed columns and all indexes. ### Collect statistics on some columns -In most cases, the optimizer only uses statistics on columns in the `WHERE`, `JOIN`, `ORDER BY`, and `GROUP BY` statements. These columns can be referred to as `PREDICATE COLUMNS`. +When TiDB executes SQL statements, the optimizer uses statistics for only a subset of columns in most cases. For example, columns that appear in the `WHERE`, `JOIN`, `ORDER BY`, and `GROUP BY` clauses. These columns are referred to as predicate columns. -If a table has many columns, collecting statistics on all the columns can cause a large overhead. To reduce the overhead, you can collect statistics on only specific columns (that you choose) or `PREDICATE COLUMNS` to be used by the optimizer. To persist the column list of any subset of columns for reuse in future, see [Persist column configurations](#persist-column-configurations). +If a table has many columns, collecting statistics on all the columns can cause a large overhead. To reduce the overhead, you can collect statistics for only specific columns (that you choose) or `PREDICATE COLUMNS` to be used by the optimizer. To persist the column list of any subset of columns for reuse in future, see [Persist column configurations](#persist-column-configurations). > **Note:** > @@ -144,38 +144,30 @@ If a table has many columns, collecting statistics on all the columns can cause In the syntax, `ColumnNameList` specifies the name list of the target columns. If you need to specify more than one column, use comma `,` to separate the column names. For example, `ANALYZE table t columns a, b`. Besides collecting statistics on the specific columns in a specific table, this syntax collects statistics on the indexed columns and all indexes in that table at the same time. -- To collect statistics on `PREDICATE COLUMNS`, do the following: +- To collect statistics on `PREDICATE COLUMNS`, use the following syntax: - > **Warning:** - > - > Currently, collecting statistics on `PREDICATE COLUMNS` is an experimental feature. It is not recommended that you use it in production environments. - - 1. Set the value of the [`tidb_enable_column_tracking`](/system-variables.md#tidb_enable_column_tracking-new-in-v540) system variable to `ON` to enable TiDB to collect `PREDICATE COLUMNS`. - - - - After the setting, TiDB writes the `PREDICATE COLUMNS` information to the [`mysql.column_stats_usage`](/mysql-schema/mysql-schema.md#statistics-system-tables) system table every 100 * [`stats-lease`](/tidb-configuration-file.md#stats-lease). + ```sql + ANALYZE TABLE TableName PREDICATE COLUMNS [WITH NUM BUCKETS|TOPN|CMSKETCH DEPTH|CMSKETCH WIDTH]|[WITH NUM SAMPLES|WITH FLOATNUM SAMPLERATE]; + ``` - + - + TiDB always writes the `PREDICATE COLUMNS` information to the [`mysql.column_stats_usage`](/mysql-schema/mysql-schema.md#statistics-system-tables) system table every 100 * [`stats-lease`](/tidb-configuration-file.md#stats-lease). - After the setting, TiDB writes the `PREDICATE COLUMNS` information to the [`mysql.column_stats_usage`](/mysql-schema/mysql-schema.md#statistics-system-tables) system table every 300 seconds. + - + - 2. After the query pattern of your business is relatively stable, collect statistics on `PREDICATE COLUMNS` by using the following syntax: + TiDB always writes the `PREDICATE COLUMNS` information to the [`mysql.column_stats_usage`](/mysql-schema/mysql-schema.md#statistics-system-tables) system table every 300 seconds. - ```sql - ANALYZE TABLE TableName PREDICATE COLUMNS [WITH NUM BUCKETS|TOPN|CMSKETCH DEPTH|CMSKETCH WIDTH]|[WITH NUM SAMPLES|WITH FLOATNUM SAMPLERATE]; - ``` + - Besides collecting statistics on `PREDICATE COLUMNS` in a specific table, this syntax collects statistics on indexed columns and all indexes in that table at the same time. + In addition to collecting statistics on `PREDICATE COLUMNS` in a specific table, this syntax collects statistics on indexed columns and all indexes in that table at the same time. - > **Note:** - > - > - If the [`mysql.column_stats_usage`](/mysql-schema/mysql-schema.md#statistics-system-tables) system table does not contain any `PREDICATE COLUMNS` recorded for that table, the preceding syntax collects statistics on all columns and all indexes in that table. - > - Any columns excluded from collection (either by manually listing columns or using `PREDICATE COLUMNS`) will not have their statistics overwritten. When executing a new type of SQL query, the optimizer will use the old statistics for such columns if it exists or pseudo column statistics if columns never had statistics collected. The next ANALYZE using `PREDICATE COLUMNS` will collect the statistics on those columns. + > **Note:** + > + > - If the [`mysql.column_stats_usage`](/mysql-schema/mysql-schema.md#statistics-system-tables) system table does not contain any `PREDICATE COLUMNS` recorded for that table, the preceding syntax collects statistics on indexed columns and all indexes in that table. + > - Any columns excluded from collection (either by manually listing columns or using `PREDICATE COLUMNS`) will not have their statistics overwritten. When executing a new type of SQL query, the optimizer will use the old statistics for such columns if it exists, or pseudo column statistics if columns never had statistics collected. The next ANALYZE using `PREDICATE COLUMNS` will collect the statistics on those columns. - To collect statistics on all columns and indexes, use the following syntax: @@ -329,9 +321,6 @@ To locate `PREDICATE COLUMNS` and columns on which statistics have been collecte In the following example, after executing `ANALYZE TABLE t PREDICATE COLUMNS;`, TiDB collects statistics on columns `b`, `c`, and `d`, where column `b` is a `PREDICATE COLUMN` and columns `c` and `d` are index columns. ```sql -SET GLOBAL tidb_enable_column_tracking = ON; -Query OK, 0 rows affected (0.00 sec) - CREATE TABLE t (a INT, b INT, c INT, d INT, INDEX idx_c_d(c, d)); Query OK, 0 rows affected (0.00 sec) diff --git a/system-variables.md b/system-variables.md index 3b28ba7193dbe..63ad87e2df4ef 100644 --- a/system-variables.md +++ b/system-variables.md @@ -1112,6 +1112,22 @@ MPP is a distributed computing framework provided by the TiFlash engine, which a - Default value: `OFF` - This variable is used to set whether the `AUTO_INCREMENT` property of a column is allowed to be removed by executing `ALTER TABLE MODIFY` or `ALTER TABLE CHANGE` statements. It is not allowed by default. +### tidb_analyze_column_options New in v8.3.0 + +> **Note:** +> +> - This variable only works when [`tidb_analyze_version`](#tidb_analyze_version-new-in-v510) is set to `2`. +> - If you upgrade your TiDB cluster from a version earlier than v8.3.0 to v8.3.0 or later, this variable is set to `ALL` by default to keep the original behavior. +> - Starting from v8.3.0, for a newly deployed TiDB cluster, this variable is set to `PREDICATE` by default. + +- Scope: GLOBAL +- Persists to cluster: Yes +- Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No +- Type: Enumeration +- Default value: `PREDICATE` +- Value options:`ALL`, `PREDICATE` +- This variable controls the behavior of the `ANALYZE TABLE` statement. Setting it to `PREDICATE` means only collecting statistics for [predicate columns](/statistics.md#collect-statistics-on-some-columns); setting it to `ALL` means collecting statistics for all columns. In scenarios where OLAP queries are used, it is recommended to set it to `ALL`, otherwise collecting statistics can result in a significant drop in query performance. + ### tidb_analyze_distsql_scan_concurrency New in v7.6.0 - Scope: SESSION | GLOBAL @@ -2003,7 +2019,7 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; > **Warning:** > -> Currently, collecting statistics on `PREDICATE COLUMNS` is an experimental feature. It is not recommended that you use it in production environments. +> Starting from v8.3.0, this variable is deprecated. TiDB tracks predicate columns by default. For more information, see [`tidb_analyze_column_options`](#tidb_analyze_column_options-new-in-v830). - Scope: GLOBAL - Persists to cluster: Yes diff --git a/tiup/tiup-bench.md b/tiup/tiup-bench.md index c69b483af73eb..a7d631ad43557 100644 --- a/tiup/tiup-bench.md +++ b/tiup/tiup-bench.md @@ -152,7 +152,17 @@ Flags: tiup bench tpch --sf=1 prepare ``` -2. Run the TPC-H test by executing one of the following commands: +2. Collect statistics: + + For the OLAP scenarios, to ensure that the TiDB optimizer can generate the optimal execution plan, execute the following SQL statements to collect statistics in advance. **Be sure to set [`tidb_analyze_column_options`](/system-variables.md#tidb_analyze_column_options-new-in-v830) to `ALL`, otherwise collecting statistics can result in a significant drop in query performance.** + + {{< copyable "shell-regular" >}} + + ```sql + set global tidb_analyze_column_options='ALL'; + ``` + +3. Run the TPC-H test by executing one of the following commands: - If you check the result, run this command: @@ -170,7 +180,7 @@ Flags: tiup bench tpch --count=22 --sf=1 run ``` -3. Clean up data: +4. Clean up data: {{< copyable "shell-regular" >}} From f597bb728ffa4cc0784dcf928255f6e08761992f Mon Sep 17 00:00:00 2001 From: YangKeao Date: Thu, 15 Aug 2024 15:02:41 +0800 Subject: [PATCH 17/44] variable: add document about lazy cursor fetch and its variable (#18329) --- best-practices/java-app-best-practices.md | 12 ++++-- develop/dev-guide-connection-parameters.md | 12 ++++-- system-variables.md | 44 ++++++++++++++++++++++ 3 files changed, 60 insertions(+), 8 deletions(-) diff --git a/best-practices/java-app-best-practices.md b/best-practices/java-app-best-practices.md index f593ea6e02827..24324f44c132f 100644 --- a/best-practices/java-app-best-practices.md +++ b/best-practices/java-app-best-practices.md @@ -62,17 +62,21 @@ For batch inserts, you can use the [`addBatch`/`executeBatch` API](https://www.t In most scenarios, to improve execution efficiency, JDBC obtains query results in advance and save them in client memory by default. But when the query returns a super large result set, the client often wants the database server to reduce the number of records returned at a time, and waits until the client's memory is ready and it requests for the next batch. -Usually, there are two kinds of processing methods in JDBC: +The following two processing methods are usually used in JDBC: -- [Set `FetchSize` to `Integer.MIN_VALUE`](https://dev.mysql.com/doc/connector-j/en/connector-j-reference-implementation-notes.html#ResultSet) to ensure that the client does not cache. The client will read the execution result from the network connection through `StreamingResult`. +- The first method: [Set `FetchSize` to `Integer.MIN_VALUE`](https://dev.mysql.com/doc/connector-j/en/connector-j-reference-implementation-notes.html#ResultSet) to ensure that the client does not cache. The client will read the execution result from the network connection through `StreamingResult`. When the client uses the streaming read method, it needs to finish reading or close `resultset` before continuing to use the statement to make a query. Otherwise, the error `No statements may be issued when any streaming result sets are open and in use on a given connection. Ensure that you have called .close() on any active streaming result sets before attempting more queries.` is returned. To avoid such an error in queries before the client finishes reading or closes `resultset`, you can add the `clobberStreamingResults=true` parameter in the URL. Then, `resultset` is automatically closed but the result set to be read in the previous streaming query is lost. -- To use Cursor Fetch, first [set `FetchSize`](http://makejavafaster.blogspot.com/2015/06/jdbc-fetch-size-performance.html) as a positive integer and configure `useCursorFetch=true` in the JDBC URL. +- The second method: Use Cursor Fetch by first [setting `FetchSize`](http://makejavafaster.blogspot.com/2015/06/jdbc-fetch-size-performance.html) as a positive integer and then configuring `useCursorFetch = true` in the JDBC URL. -TiDB supports both methods, but it is preferred that you use the first method, because it is a simpler implementation and has a better execution efficiency. +TiDB supports both methods, but it is recommended that you use the first method that sets `FetchSize` to `Integer.MIN_VALUE`, because it is a simpler implementation and has better execution efficiency. + +For the second method, TiDB first loads all data to the TiDB node, and then returns data to the client according to the `FetchSize`. Therefore, it usually consumes more memory than the first method. If [`tidb_enable_tmp_storage_on_oom`](/system-variables.md#tidb_enable_tmp_storage_on_oom) is set to `ON`, TiDB might temporarily write the result to the hard disk. + +If the [`tidb_enable_lazy_cursor_fetch`](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830) system variable is set to `ON`, TiDB tries to read part of the data only when the client fetches it, which uses less memory. For more details and limitations, read the [complete descriptions for the `tidb_enable_lazy_cursor_fetch` system variable](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830). ### MySQL JDBC parameters diff --git a/develop/dev-guide-connection-parameters.md b/develop/dev-guide-connection-parameters.md index af208398374c1..da75d89cd63f2 100644 --- a/develop/dev-guide-connection-parameters.md +++ b/develop/dev-guide-connection-parameters.md @@ -143,17 +143,21 @@ For batch inserts, you can use the [`addBatch`/`executeBatch` API](https://www.t In most scenarios, to improve execution efficiency, JDBC obtains query results in advance and saves them in client memory by default. But when the query returns a super large result set, the client often wants the database server to reduce the number of records returned at a time and waits until the client's memory is ready and it requests for the next batch. -Usually, there are two kinds of processing methods in JDBC: +The following two processing methods are usually used in JDBC: -- [Set **FetchSize** to `Integer.MIN_VALUE`](https://dev.mysql.com/doc/connector-j/en/connector-j-reference-implementation-notes.html#ResultSet) to ensure that the client does not cache. The client will read the execution result from the network connection through `StreamingResult`. +- The first method: [Set **FetchSize** to `Integer.MIN_VALUE`](https://dev.mysql.com/doc/connector-j/en/connector-j-reference-implementation-notes.html#ResultSet) to ensure that the client does not cache. The client will read the execution result from the network connection through `StreamingResult`. When the client uses the streaming read method, it needs to finish reading or close `resultset` before continuing to use the statement to make a query. Otherwise, the error `No statements may be issued when any streaming result sets are open and in use on a given connection. Ensure that you have called .close() on any active streaming result sets before attempting more queries.` is returned. To avoid such an error in queries before the client finishes reading or closes `resultset`, you can add the `clobberStreamingResults=true` parameter in the URL. Then, `resultset` is automatically closed but the result set to be read in the previous streaming query is lost. -- To use Cursor Fetch, first [set `FetchSize`](http://makejavafaster.blogspot.com/2015/06/jdbc-fetch-size-performance.html) as a positive integer and configure `useCursorFetch=true` in the JDBC URL. +- The second method: Use Cursor Fetch by first [setting `FetchSize`](http://makejavafaster.blogspot.com/2015/06/jdbc-fetch-size-performance.html) as a positive integer and then configuring `useCursorFetch = true` in the JDBC URL. -TiDB supports both methods, but it is preferred that you use the first method, because it is a simpler implementation and has a better execution efficiency. +TiDB supports both methods, but it is recommended that you use the first method that sets `FetchSize` to `Integer.MIN_VALUE`, because it is a simpler implementation and has better execution efficiency. + +For the second method, TiDB first loads all data to the TiDB node, and then returns data to the client according to the `FetchSize`. Therefore, it usually consumes more memory than the first method. If [`tidb_enable_tmp_storage_on_oom`](/system-variables.md#tidb_enable_tmp_storage_on_oom) is set to `ON`, TiDB might temporarily write the result to the hard disk. + +If the [`tidb_enable_lazy_cursor_fetch`](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830) system variable is set to `ON`, TiDB tries to read part of the data only when the client fetches it, which uses less memory. For more details and limitations, read the [complete descriptions for the `tidb_enable_lazy_cursor_fetch` system variable](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830). ### MySQL JDBC parameters diff --git a/system-variables.md b/system-variables.md index 63ad87e2df4ef..e24e13a763671 100644 --- a/system-variables.md +++ b/system-variables.md @@ -2155,6 +2155,50 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; - Possible values: `OFF`, `ON` - This variable controls whether to support creating `Global indexes` for partitioned tables. `Global index` is currently in the development stage. **It is not recommended to modify the value of this system variable**. +### tidb_enable_lazy_cursor_fetch New in v8.3.0 + +> **Warning:** +> +> The feature controlled by this variable is experimental. It is not recommended that you use it in the production environment. This feature might be changed or removed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. + + + +- Scope: GLOBAL +- Persists to cluster: Yes +- Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No +- Type: Boolean +- Default value: `OFF` +- Possible values: `OFF`, `ON` +- This variable controls the behavior of the [Cursor Fetch](/develop/dev-guide-connection-parameters.md#use-streamingresult-to-get-the-execution-result) feature. + - When Cursor Fetch is enabled and this variable is set to `OFF`, TiDB reads all the data at the start of statement execution, stores the data into TiDB's memory, and returns it to the client based on the client's specified `FetchSize` for subsequent client reads. If the result set is too large, TiDB might temporarily write the result to the hard disk. + - When Cursor Fetch is enabled and this variable is set to `ON`, TiDB does not read all the data into the TiDB node at once, but reads data into the TiDB node incrementally as the client fetches it. +- The feature controlled by this variable has the following limitations: + - It does not support statements within explicit transactions. + - It only supports execution plans that contain and only contain `TableReader`, `IndexReader`, `IndexLookUp`, `Projection`, and `Selection` operators. + - For statements using Lazy Cursor Fetch, execution information does not appear in the [statements summary](/statement-summary-tables.md) and [slow query log](/identify-slow-queries.md). +- For unsupported scenarios, its behavior is the same as when setting this variable to `OFF`. + + + + + +- Scope: GLOBAL +- Persists to cluster: Yes +- Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No +- Type: Boolean +- Default value: `OFF` +- Possible values: `OFF`, `ON` +- This variable controls the behavior of the [Cursor Fetch](/develop/dev-guide-connection-parameters.md#use-streamingresult-to-get-the-execution-result) feature. + - When Cursor Fetch is enabled and this variable is set to `OFF`, TiDB reads all the data at the start of statement execution, stores the data into TiDB's memory, and returns it to the client based on the client's specified `FetchSize` for subsequent client reads. If the result set is too large, TiDB might temporarily write the result to the hard disk. + - When Cursor Fetch is enabled and this variable is set to `ON`, TiDB does not read all the data into the TiDB node at once, but reads data into the TiDB node incrementally as the client fetches it. +- The feature controlled by this variable has the following limitations: + - It does not support statements within explicit transactions. + - It only supports execution plans that contain and only contain `TableReader`, `IndexReader`, `IndexLookUp`, `Projection`, and `Selection` operators. + - For statements using Lazy Cursor Fetch, execution information does not appear in the [statements summary](/statement-summary-tables.md) and [slow query log](https://docs.pingcap.com/tidb/stable/identify-slow-queries). +- For unsupported scenarios, its behavior is the same as when setting this variable to `OFF`. + + + ### tidb_enable_non_prepared_plan_cache - Scope: SESSION | GLOBAL From 2d19dec67a888a7b72c5c53dae95248a5bf3f9b5 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 15 Aug 2024 15:02:47 +0800 Subject: [PATCH 18/44] br: make inc + log restore compatible (#18557) --- br/br-incremental-guide.md | 6 ++++++ br/br-pitr-manual.md | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/br/br-incremental-guide.md b/br/br-incremental-guide.md index 56f159b5ef102..e7b4815cf75f9 100644 --- a/br/br-incremental-guide.md +++ b/br/br-incremental-guide.md @@ -17,6 +17,12 @@ Because restoring the incremental backup relies on the snapshot of the database The incremental backup does not support batch renaming of tables. If batch renaming of tables occurs during the incremental backup process, the data restore might fail. It is recommended to perform a full backup after batch renaming tables, and use the latest full backup to replace the incremental data during restore. +Starting from v8.3.0, the `--allow-pitr-from-incremental` configuration parameter is introduced to control whether incremental backups and subsequent log backups are compatible. The default value is `true`, which means that incremental backups are compatible with subsequent log backups. + +- When you keep the default value `true`, the DDLs that need to be replayed are strictly checked before the incremental restore begins. This mode does not yet support `ADD INDEX`, `MODIFY COLUMN`, or `REORG PARTITION`. If you want to use incremental backups together with log backups, make sure that none of the preceding DDLs exist during the incremental backup process. Otherwise, these three DDLs cannot be replayed correctly. + +- If you want to use incremental restores without log backups during the whole recovery process, you can set `--allow-pitr-from-incremental` to `false` to skip the checks in the incremental recovery phase. + ## Back up incremental data To back up incremental data, run the `tiup br backup` command with **the last backup timestamp** `--lastbackupts` specified. In this way, br command-line tool automatically backs up incremental data generated between `lastbackupts` and the current time. To get `--lastbackupts`, run the `validate` command. The following is an example: diff --git a/br/br-pitr-manual.md b/br/br-pitr-manual.md index d4c9076ea1949..5a8e128d5d627 100644 --- a/br/br-pitr-manual.md +++ b/br/br-pitr-manual.md @@ -341,6 +341,10 @@ Expected output: ## Restore to a specified point in time (PITR) +> **Note:** +> +> If you specify `--full-backup-storage` as the incremental backup address for `restore point`, for restores of this backup and any previous incremental backups, you need to set the parameter `--allow-pitr-from-incremental` to `true` to make the incremental backups compatible with the subsequent log backups. + You can run the `tiup br restore point` command to perform a PITR on a new cluster or just restore the log backup data. Run `tiup br restore point --help` to see the help information: From e3250a35a652d3fb42cd2abe1c48e7de26213b41 Mon Sep 17 00:00:00 2001 From: Zhou Kunqin <25057648+time-and-fate@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:41:40 +0800 Subject: [PATCH 19/44] *: add batch operation for creating/dropping binding by specifying digests (#18556) --- sql-plan-management.md | 26 +-- .../sql-statement-create-binding.md | 178 ++++++++++++++++-- sql-statements/sql-statement-drop-binding.md | 117 ++++++------ 3 files changed, 237 insertions(+), 84 deletions(-) diff --git a/sql-plan-management.md b/sql-plan-management.md index 956c283f91bf6..e7d349ec7827c 100644 --- a/sql-plan-management.md +++ b/sql-plan-management.md @@ -231,25 +231,25 @@ The original SQL statement and the bound statement must have the same text after #### Create a binding according to a historical execution plan -To make the execution plan of a SQL statement fixed to a historical execution plan, you can use `plan_digest` to bind that historical execution plan to the SQL statement, which is more convenient than binding it according to a SQL statement. +To make the execution plan of a SQL statement fixed to a historical execution plan, you can use Plan Digest to bind that historical execution plan to the SQL statement, which is more convenient than binding it according to a SQL statement. In addition, you can bind the execution plan for multiple SQL statements at once. For more details and examples, see [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md). When using this feature, note the following: - The feature generates hints according to historical execution plans and uses the generated hints for binding. Because historical execution plans are stored in [Statement Summary Tables](/statement-summary-tables.md), before using this feature, you need to enable the [`tidb_enable_stmt_summary`](/system-variables.md#tidb_enable_stmt_summary-new-in-v304) system variable first. - For TiFlash queries, Join queries with three or more tables, and queries that contain subqueries, the auto-generated hints are not adequate, which might result in the plan not being fully bound. In such cases, a warning will occur when creating a binding. -- If a historical execution plan is for a SQL statement with hints, the hints will be added to the binding. For example, after executing `SELECT /*+ max_execution_time(1000) */ * FROM t`, the binding created with its `plan_digest` will include `max_execution_time(1000)`. +- If a historical execution plan is for a SQL statement with hints, the hints will be added to the binding. For example, after executing `SELECT /*+ max_execution_time(1000) */ * FROM t`, the binding created with its Plan Digest will include `max_execution_time(1000)`. The SQL statement of this binding method is as follows: ```sql -CREATE [GLOBAL | SESSION] BINDING FROM HISTORY USING PLAN DIGEST 'plan_digest'; +CREATE [GLOBAL | SESSION] BINDING FROM HISTORY USING PLAN DIGEST StringLiteralOrUserVariableList; ``` -This statement binds an execution plan to a SQL statement using `plan_digest`. The default scope is SESSION. The applicable SQL statements, priorities, scopes, and effective conditions of the created bindings are the same as that of [bindings created according to SQL statements](#create-a-binding-according-to-a-sql-statement). +The preceding statement binds an execution plan to a SQL statement using Plan Digest. The default scope is SESSION. The applicable SQL statements, priorities, scopes, and effective conditions of the created bindings are the same as that of [bindings created according to SQL statements](#create-a-binding-according-to-a-sql-statement). -To use this binding method, you need to first get the `plan_digest` corresponding to the target historical execution plan in `statements_summary`, and then create a binding using the `plan_digest`. The detailed steps are as follows: +To use this binding method, you need to first get the Plan Digest corresponding to the target historical execution plan in `statements_summary`, and then create a binding using the Plan Digest. The detailed steps are as follows: -1. Get the `plan_digest` corresponding to the target execution plan in `statements_summary`. +1. Get the Plan Digest corresponding to the target execution plan in `statements_summary`. For example: @@ -274,9 +274,9 @@ To use this binding method, you need to first get the `plan_digest` correspondin BINARY_PLAN: 6QOYCuQDCg1UYWJsZVJlYWRlcl83Ev8BCgtTZWxlY3Rpb25fNhKOAQoPBSJQRnVsbFNjYW5fNSEBAAAAOA0/QSkAAQHwW4jDQDgCQAJKCwoJCgR0ZXN0EgF0Uh5rZWVwIG9yZGVyOmZhbHNlLCBzdGF0czpwc2V1ZG9qInRpa3ZfdGFzazp7dGltZTo1NjAuOMK1cywgbG9vcHM6MH1w////CQMEAXgJCBD///8BIQFzCDhVQw19BAAkBX0QUg9lcSgBfCAudC5hLCAxKWrmYQAYHOi0gc6hBB1hJAFAAVIQZGF0YTo9GgRaFAW4HDQuMDVtcywgCbYcMWKEAWNvcF8F2agge251bTogMSwgbWF4OiA1OTguNsK1cywgcHJvY19rZXlzOiAwLCBycGNfBSkAMgkMBVcQIDYwOS4pEPBDY29wcl9jYWNoZV9oaXRfcmF0aW86IDAuMDAsIGRpc3RzcWxfY29uY3VycmVuY3k6IDE1fXCwAXj///////////8BGAE= ``` - In this example, you can see that the execution plan corresponding to `plan_digest` is `4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb`. + In this example, you can see that the execution plan corresponding to Plan Digest is `4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb`. -2. Use `plan_digest` to create a binding: +2. Use Plan Digest to create a binding: ```sql CREATE BINDING FROM HISTORY USING PLAN DIGEST '4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb'; @@ -317,7 +317,7 @@ SELECT @@LAST_PLAN_FROM_BINDING; ### Remove a binding -You can remove a binding according to a SQL statement or `sql_digest`. +You can remove a binding according to a SQL statement or SQL Digest. #### Remove a binding according to a SQL statement @@ -343,15 +343,15 @@ explain SELECT * FROM t1,t2 WHERE t1.id = t2.id; In the example above, the dropped binding in the SESSION scope shields the corresponding binding in the GLOBAL scope. The optimizer does not add the `sm_join(t1, t2)` hint to the statement. The top node of the execution plan in the `explain` result is not fixed to MergeJoin by this hint. Instead, the top node is independently selected by the optimizer according to the cost estimation. -#### Remove a binding according to `sql_digest` +#### Remove a binding according to SQL Digest -In addition to removing a binding according to a SQL statement, you can also remove a binding according to `sql_digest`. +In addition to removing a binding according to a SQL statement, you can also remove a binding according to SQL Digest. For more details and examples, see [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md). ```sql -DROP [GLOBAL | SESSION] BINDING FOR SQL DIGEST 'sql_digest'; +DROP [GLOBAL | SESSION] BINDING FOR SQL DIGEST StringLiteralOrUserVariableList; ``` -This statement removes an execution plan binding corresponding to `sql_digest` at the GLOBAL or SESSION level. The default scope is SESSION. You can get the `sql_digest` by [viewing bindings](#view-bindings). +This statement removes an execution plan binding corresponding to SQL Digest at the GLOBAL or SESSION level. The default scope is SESSION. You can get the SQL Digest by [viewing bindings](#view-bindings). > **Note:** > diff --git a/sql-statements/sql-statement-create-binding.md b/sql-statements/sql-statement-create-binding.md index fcf3dc8ce70d8..7788679e68967 100644 --- a/sql-statements/sql-statement-create-binding.md +++ b/sql-statements/sql-statement-create-binding.md @@ -17,13 +17,19 @@ The bound SQL statement is parameterized and stored in the system table. When a ```ebnf+diagram CreateBindingStmt ::= 'CREATE' GlobalScope 'BINDING' ( 'FOR' BindableStmt 'USING' BindableStmt -| 'FROM' 'HISTORY' 'USING' 'PLAN' 'DIGEST' PlanDigest ) +| 'FROM' 'HISTORY' 'USING' 'PLAN' 'DIGEST' StringLiteralOrUserVariableList ) GlobalScope ::= ( 'GLOBAL' | 'SESSION' )? BindableStmt ::= ( SelectStmt | UpdateStmt | InsertIntoStmt | ReplaceIntoStmt | DeleteStmt ) + +StringLiteralOrUserVariableList ::= + ( StringLitOrUserVariable | StringLiteralOrUserVariableList ',' StringLitOrUserVariable ) + +StringLiteralOrUserVariable ::= + ( stringLiteral | UserVariable ) ``` **** @@ -32,6 +38,11 @@ BindableStmt ::= You can create a binding according to a SQL statement or a historical execution plan. +When you create a binding according to a historical execution plan, you need to specify the corresponding Plan Digest: + +- You can use either the string literal or user variable of the string type to specify the Plan Digest. +- You can specify multiple Plan Digests to create bindings for multiple statements at the same time. In this case, you can specify multiple strings, and include multiple digests in each string. Note that the strings or digests need to be separated by commas. + The following example shows how to create a binding according to a SQL statement. {{< copyable "sql" >}} @@ -139,34 +150,165 @@ mysql> EXPLAIN ANALYZE SELECT * FROM t1 WHERE b = 123; The following example shows how to create a binding according to a historical execution plan. ```sql -mysql> CREATE TABLE t(id INT PRIMARY KEY , a INT, KEY(a)); -Query OK, 0 rows affected (0.06 sec) +USE test; +CREATE TABLE t1(a INT, b INT, c INT, INDEX ia(a)); +CREATE TABLE t2(a INT, b INT, c INT, INDEX ia(a)); +INSERT INTO t1 SELECT * FROM t2 WHERE a = 1; +SELECT @@LAST_PLAN_FROM_BINDING; +UPDATE /*+ INL_JOIN(t2) */ t1, t2 SET t1.a = 1 WHERE t1.b = t2.a; +SELECT @@LAST_PLAN_FROM_BINDING; +DELETE /*+ HASH_JOIN(t1) */ t1 FROM t1 JOIN t2 WHERE t1.b = t2.a; +SELECT @@LAST_PLAN_FROM_BINDING; +SELECT * FROM t1 WHERE t1.a IN (SELECT a FROM t2); +SELECT @@LAST_PLAN_FROM_BINDING; +``` + +Method 1: + +```sql +SELECT query_sample_text, stmt_type, table_names, plan_digest FROM information_schema.statements_summary_history WHERE table_names LIKE '%test.t1%' AND stmt_type != 'CreateTable'; +CREATE GLOBAL BINDING FROM HISTORY USING PLAN DIGEST 'e72819cf99932f63a548156dbf433adda60e10337e89dcaa8638b4caf16f64d8,c291edc36b2482738d3389d335f37efc76290be2930330fe5034c5f4c42eeb36,8dc146249484f4a6ab219bfe9effa6b7a18aeed3764d49b610da61ac347ab914,73b2dec866595688ea416675f88ccb3456eb8e7443a79cd816695b688e07ac6b'; +``` + +Method 2: + +```sql +SELECT @digests:=GROUP_CONCAT(plan_digest) FROM information_schema.statements_summary_history WHERE table_names LIKE '%test.t1%' AND stmt_type != 'CreateTable'; +CREATE GLOBAL BINDING FROM HISTORY USING PLAN DIGEST @digests; +``` + +```sql +SHOW GLOBAL BINDINGS; +INSERT INTO t1 SELECT * FROM t2 WHERE a = 1; +SELECT @@LAST_PLAN_FROM_BINDING; +UPDATE t1, t2 SET t1.a = 1 WHERE t1.b = t2.a; +SELECT @@LAST_PLAN_FROM_BINDING; +DELETE t1 FROM t1 JOIN t2 WHERE t1.b = t2.a; +SELECT @@LAST_PLAN_FROM_BINDING; +SELECT * FROM t1 WHERE t1.a IN (SELECT a FROM t2); +SELECT @@LAST_PLAN_FROM_BINDING; +``` + +```sql +> CREATE TABLE t1(a INT, b INT, c INT, INDEX ia(a)); +Query OK, 0 rows affected (0.048 sec) + +> CREATE TABLE t2(a INT, b INT, c INT, INDEX ia(a)); +Query OK, 0 rows affected (0.035 sec) + +> INSERT INTO t1 SELECT * FROM t2 WHERE a = 1; +Query OK, 0 rows affected (0.002 sec) +Records: 0 Duplicates: 0 Warnings: 0 + +> SELECT @@LAST_PLAN_FROM_BINDING; ++--------------------------+ +| @@LAST_PLAN_FROM_BINDING | ++--------------------------+ +| 0 | ++--------------------------+ +1 row in set (0.001 sec) + +> UPDATE /*+ INL_JOIN(t2) */ t1, t2 SET t1.a = 1 WHERE t1.b = t2.a; +Query OK, 0 rows affected (0.005 sec) +Rows matched: 0 Changed: 0 Warnings: 0 + +> SELECT @@LAST_PLAN_FROM_BINDING; ++--------------------------+ +| @@LAST_PLAN_FROM_BINDING | ++--------------------------+ +| 0 | ++--------------------------+ +1 row in set (0.000 sec) -mysql> SELECT /*+ IGNORE_INDEX(t, a) */ * FROM t WHERE a = 1; -Empty set (0.01 sec) +> DELETE /*+ HASH_JOIN(t1) */ t1 FROM t1 JOIN t2 WHERE t1.b = t2.a; +Query OK, 0 rows affected (0.003 sec) -mysql> SELECT plan_digest FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY WHERE QUERY_SAMPLE_TEXT = 'SELECT /*+ IGNORE_INDEX(t, a) */ * FROM t WHERE a = 1'; -+------------------------------------------------------------------+ -| plan_digest | -+------------------------------------------------------------------+ -| 4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb | -+------------------------------------------------------------------+ -1 row in set (0.01 sec) +> SELECT @@LAST_PLAN_FROM_BINDING; ++--------------------------+ +| @@LAST_PLAN_FROM_BINDING | ++--------------------------+ +| 0 | ++--------------------------+ +1 row in set (0.000 sec) -mysql> CREATE BINDING FROM HISTORY USING PLAN DIGEST '4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb'; -Query OK, 0 rows affected (0.02 sec) +> SELECT * FROM t1 WHERE t1.a IN (SELECT a FROM t2); +Empty set (0.002 sec) -mysql> SELECT * FROM t WHERE a = 1; -Empty set (0.01 sec) +> SELECT @@LAST_PLAN_FROM_BINDING; ++--------------------------+ +| @@LAST_PLAN_FROM_BINDING | ++--------------------------+ +| 0 | ++--------------------------+ +1 row in set (0.001 sec) + +> SELECT @digests:=GROUP_CONCAT(plan_digest) FROM information_schema.statements_summary_history WHERE table_names LIKE '%test.t1%' AND stmt_type != 'CreateTable'; ++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| @digests:=GROUP_CONCAT(plan_digest) | ++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| 73b2dec866595688ea416675f88ccb3456eb8e7443a79cd816695b688e07ac6b,8dc146249484f4a6ab219bfe9effa6b7a18aeed3764d49b610da61ac347ab914,c291edc36b2482738d3389d335f37efc76290be2930330fe5034c5f4c42eeb36,e72819cf99932f63a548156dbf433adda60e10337e89dcaa8638b4caf16f64d8 | ++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +1 row in set (0.001 sec) + +> CREATE GLOBAL BINDING FROM HISTORY USING PLAN DIGEST @digests; +Query OK, 0 rows affected (0.060 sec) + +> SHOW GLOBAL BINDINGS; ++----------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------+---------+-------------------------+-------------------------+---------+-----------------+---------+------------------------------------------------------------------+------------------------------------------------------------------+ +| Original_sql | Bind_sql | Default_db | Status | Create_time | Update_time | Charset | Collation | Source | Sql_digest | Plan_digest | ++----------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------+---------+-------------------------+-------------------------+---------+-----------------+---------+------------------------------------------------------------------+------------------------------------------------------------------+ +| insert into `test` . `t1` select * from `test` . `t2` where `a` = ? | INSERT INTO `test`.`t1` SELECT /*+ use_index(@`sel_1` `test`.`t2` `ia`) no_order_index(@`sel_1` `test`.`t2` `ia`)*/ * FROM `test`.`t2` WHERE `a` = 1 | test | enabled | 2024-08-11 05:27:19.669 | 2024-08-11 05:27:19.669 | utf8 | utf8_general_ci | history | bd23e6af17e7b77b25383e50e258f0dece18583d19772f08caacb2021945a300 | e72819cf99932f63a548156dbf433adda60e10337e89dcaa8638b4caf16f64d8 | +| update ( `test` . `t1` ) join `test` . `t2` set `t1` . `a` = ? where `t1` . `b` = `t2` . `a` | UPDATE /*+ inl_join(`test`.`t2`) use_index(@`upd_1` `test`.`t1` ) use_index(@`upd_1` `test`.`t2` `ia`) no_order_index(@`upd_1` `test`.`t2` `ia`)*/ (`test`.`t1`) JOIN `test`.`t2` SET `t1`.`a`=1 WHERE `t1`.`b` = `t2`.`a` | test | enabled | 2024-08-11 05:27:19.667 | 2024-08-11 05:27:19.667 | utf8 | utf8_general_ci | history | 987e91af17eb40e36fecfc0634cce0b6a736de02bb009091810f932804fc02e9 | c291edc36b2482738d3389d335f37efc76290be2930330fe5034c5f4c42eeb36 | +| delete `test` . `t1` from `test` . `t1` join `test` . `t2` where `t1` . `b` = `t2` . `a` | DELETE /*+ hash_join_build(`test`.`t2`) use_index(@`del_1` `test`.`t1` ) use_index(@`del_1` `test`.`t2` )*/ `test`.`t1` FROM `test`.`t1` JOIN `test`.`t2` WHERE `t1`.`b` = `t2`.`a` | test | enabled | 2024-08-11 05:27:19.664 | 2024-08-11 05:27:19.664 | utf8 | utf8_general_ci | history | 70ef3d442d95c51020a76c7c86a3ab674258606d4dd24bbd16ac6f69d87a4316 | 8dc146249484f4a6ab219bfe9effa6b7a18aeed3764d49b610da61ac347ab914 | +| select * from `test` . `t1` where `t1` . `a` in ( select `a` from `test` . `t2` ) | SELECT /*+ use_index(@`sel_1` `test`.`t1` ) stream_agg(@`sel_2`) use_index(@`sel_2` `test`.`t2` `ia`) order_index(@`sel_2` `test`.`t2` `ia`) agg_to_cop(@`sel_2`)*/ * FROM `test`.`t1` WHERE `t1`.`a` IN (SELECT `a` FROM `test`.`t2`) | test | enabled | 2024-08-11 05:27:19.649 | 2024-08-11 05:27:19.649 | utf8 | utf8_general_ci | history | b58508a5e29d7889adf98cad50343d7a575fd32ad55dbdaa88e14ecde54f3d93 | 73b2dec866595688ea416675f88ccb3456eb8e7443a79cd816695b688e07ac6b | ++----------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------+---------+-------------------------+-------------------------+---------+-----------------+---------+------------------------------------------------------------------+------------------------------------------------------------------+ +4 rows in set (0.001 sec) + +> INSERT INTO t1 SELECT * FROM t2 WHERE a = 1; +Query OK, 0 rows affected (0.002 sec) +Records: 0 Duplicates: 0 Warnings: 0 + +> SELECT @@LAST_PLAN_FROM_BINDING; ++--------------------------+ +| @@LAST_PLAN_FROM_BINDING | ++--------------------------+ +| 1 | ++--------------------------+ +1 row in set (0.000 sec) -mysql> SELECT @@LAST_PLAN_FROM_BINDING; +> UPDATE t1, t2 SET t1.a = 1 WHERE t1.b = t2.a; +Query OK, 0 rows affected (0.002 sec) +Rows matched: 0 Changed: 0 Warnings: 0 + +> SELECT @@LAST_PLAN_FROM_BINDING; ++--------------------------+ +| @@LAST_PLAN_FROM_BINDING | ++--------------------------+ +| 1 | ++--------------------------+ +1 row in set (0.000 sec) + +> DELETE t1 FROM t1 JOIN t2 WHERE t1.b = t2.a; +Query OK, 0 rows affected (0.002 sec) + +> SELECT @@LAST_PLAN_FROM_BINDING; +--------------------------+ | @@LAST_PLAN_FROM_BINDING | +--------------------------+ | 1 | +--------------------------+ -1 row in set (0.01 sec) +1 row in set (0.000 sec) + +> SELECT * FROM t1 WHERE t1.a IN (SELECT a FROM t2); +Empty set (0.002 sec) +> SELECT @@LAST_PLAN_FROM_BINDING; ++--------------------------+ +| @@LAST_PLAN_FROM_BINDING | ++--------------------------+ +| 1 | ++--------------------------+ +1 row in set (0.002 sec) ``` ## MySQL compatibility diff --git a/sql-statements/sql-statement-drop-binding.md b/sql-statements/sql-statement-drop-binding.md index 6413ccf5fc4d2..25ddb098a6cdf 100644 --- a/sql-statements/sql-statement-drop-binding.md +++ b/sql-statements/sql-statement-drop-binding.md @@ -15,18 +15,29 @@ A `BINDING` can be on either a `GLOBAL` or `SESSION` basis. The default is `SESS ```ebnf+diagram DropBindingStmt ::= 'DROP' GlobalScope 'BINDING' 'FOR' ( BindableStmt ( 'USING' BindableStmt )? -| 'SQL' 'DIGEST' SqlDigest) +| 'SQL' 'DIGEST' StringLiteralOrUserVariableList ) GlobalScope ::= ( 'GLOBAL' | 'SESSION' )? BindableStmt ::= ( SelectStmt | UpdateStmt | InsertIntoStmt | ReplaceIntoStmt | DeleteStmt ) + +StringLiteralOrUserVariableList ::= + ( StringLitOrUserVariable | StringLiteralOrUserVariableList ',' StringLitOrUserVariable ) + +StringLiteralOrUserVariable ::= + ( stringLiteral | UserVariable ) ``` ## Examples -You can remove a binding according to a SQL statement or `sql_digest`. +You can remove a binding according to a SQL statement or SQL Digest. + +When you remove a binding according to SQL Digest, you need to specify the corresponding SQL Digest: + +- You can use either the string literal or user variable of the string type to specify the Plan Digest. +- You can specify multiple string values, and include multiple digests in each string. Note that the strings or digests need to be separated by commas. The following example shows how to remove a binding according to a SQL statement. @@ -135,63 +146,63 @@ mysql> SHOW SESSION BINDINGS\G Empty set (0.00 sec) ``` -The following example shows how to remove a binding according to `sql_digest`. +The following example shows how to remove a binding according to SQL Digest. ```sql -mysql> CREATE TABLE t(id INT PRIMARY KEY , a INT, KEY(a)); -Query OK, 0 rows affected (0.06 sec) - -mysql> SELECT /*+ IGNORE_INDEX(t, a) */ * FROM t WHERE a = 1; -Empty set (0.01 sec) - -mysql> SELECT plan_digest FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY WHERE QUERY_SAMPLE_TEXT = 'SELECT /*+ IGNORE_INDEX(t, a) */ * FROM t WHERE a = 1'; -+------------------------------------------------------------------+ -| plan_digest | -+------------------------------------------------------------------+ -| 4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb | -+------------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> CREATE BINDING FROM HISTORY USING PLAN DIGEST '4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb'; -Query OK, 0 rows affected (0.02 sec) - -mysql> SELECT * FROM t WHERE a = 1; -Empty set (0.01 sec) - -mysql> SELECT @@LAST_PLAN_FROM_BINDING; -+--------------------------+ -| @@LAST_PLAN_FROM_BINDING | -+--------------------------+ -| 1 | -+--------------------------+ -1 row in set (0.01 sec) - -mysql> SHOW BINDINGS\G -*************************** 1. row *************************** -Original_sql: select * from `test` . `t` where `a` = ? - Bind_sql: SELECT /*+ use_index(@`sel_1` `test`.`t` ) ignore_index(`t` `a`)*/ * FROM `test`.`t` WHERE `a` = 1 - Default_db: test - Status: enabled - Create_time: 2022-12-14 15:26:22.277 - Update_time: 2022-12-14 15:26:22.277 - Charset: utf8mb4 - Collation: utf8mb4_general_ci - Source: history - Sql_digest: 6909a1bbce5f64ade0a532d7058dd77b6ad5d5068aee22a531304280de48349f - Plan_digest: 4e3159169cc63c14b139a4e7d72eae1759875c9a9581f94bb2079aae961189cb -1 row in set (0.02 sec) +CREATE TABLE t1(a INT, b INT, c INT, INDEX ia(a)); +CREATE TABLE t2(a INT, b INT, c INT, INDEX ia(a)); +CREATE GLOBAL BINDING FOR SELECT * FROM t1 WHERE a > 1 USING SELECT * FROM t1 USE INDEX (ia) WHERE a > 1; +CREATE GLOBAL BINDING FOR SELECT * FROM t2 WHERE a < 1 USING SELECT * FROM t2 USE INDEX (ia) WHERE a < 1; +CREATE GLOBAL BINDING FOR SELECT * FROM t1 JOIN t2 ON t1.b = t2.a USING SELECT /*+ HASH_JOIN(t1) */ * FROM t1 JOIN t2 ON t1.b = t2.a; +SHOW GLOBAL BINDINGS; +``` -ERROR: -No query specified +Method 1: -mysql> DROP BINDING FOR SQL DIGEST '6909a1bbce5f64ade0a532d7058dd77b6ad5d5068aee22a531304280de48349f'; -Query OK, 0 rows affected (0.00 sec) +```sql +DROP GLOBAL BINDING FOR SQL DIGEST '31026623c8f22264fe0dfc26f29c69c5c457d6b85960c578ebcf17a967ed7893', '0f38b2e769927ae37981c66f0988c6299b602e03f029e38aa071e656fc321593', '3c8dfc451b0e36afd904cefca5137e68fb051f02964e1958ed60afdadc25f57e'; +SHOW GLOBAL BINDINGS; +``` + +Method 2: + +```sql +SET @digests='31026623c8f22264fe0dfc26f29c69c5c457d6b85960c578ebcf17a967ed7893, 0f38b2e769927ae37981c66f0988c6299b602e03f029e38aa071e656fc321593, 3c8dfc451b0e36afd904cefca5137e68fb051f02964e1958ed60afdadc25f57e'; +DROP GLOBAL BINDING FOR SQL DIGEST @digests; +SHOW GLOBAL BINDINGS; +``` + +```sql +> CREATE TABLE t1(a INT, b INT, c INT, INDEX ia(a)); +Query OK, 0 rows affected (0.044 sec) + +> CREATE TABLE t2(a INT, b INT, c INT, INDEX ia(a)); +Query OK, 0 rows affected (0.035 sec) + +> CREATE GLOBAL BINDING FOR SELECT * FROM t1 WHERE a > 1 USING SELECT * FROM t1 USE INDEX (ia) WHERE a > 1; +Query OK, 0 rows affected (0.011 sec) + +> CREATE GLOBAL BINDING FOR SELECT * FROM t2 WHERE a < 1 USING SELECT * FROM t2 USE INDEX (ia) WHERE a < 1; +Query OK, 0 rows affected (0.013 sec) + +> CREATE GLOBAL BINDING FOR SELECT * FROM t1 JOIN t2 ON t1.b = t2.a USING SELECT /*+ HASH_JOIN(t1) */ * FROM t1 JOIN t2 ON t1.b = t2.a; +Query OK, 0 rows affected (0.012 sec) + +> SHOW GLOBAL BINDINGS; ++---------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+------------+---------+-------------------------+-------------------------+---------+-----------------+--------+------------------------------------------------------------------+-------------+ +| Original_sql | Bind_sql | Default_db | Status | Create_time | Update_time | Charset | Collation | Source | Sql_digest | Plan_digest | ++---------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+------------+---------+-------------------------+-------------------------+---------+-----------------+--------+------------------------------------------------------------------+-------------+ +| select * from `test` . `t1` join `test` . `t2` on `t1` . `b` = `t2` . `a` | SELECT /*+ HASH_JOIN(`t1`)*/ * FROM `test`.`t1` JOIN `test`.`t2` ON `t1`.`b` = `t2`.`a` | test | enabled | 2024-08-11 04:06:49.953 | 2024-08-11 04:06:49.953 | utf8 | utf8_general_ci | manual | 31026623c8f22264fe0dfc26f29c69c5c457d6b85960c578ebcf17a967ed7893 | | +| select * from `test` . `t2` where `a` < ? | SELECT * FROM `test`.`t2` USE INDEX (`ia`) WHERE `a` < 1 | test | enabled | 2024-08-11 04:06:49.937 | 2024-08-11 04:06:49.937 | utf8 | utf8_general_ci | manual | 0f38b2e769927ae37981c66f0988c6299b602e03f029e38aa071e656fc321593 | | +| select * from `test` . `t1` where `a` > ? | SELECT * FROM `test`.`t1` USE INDEX (`ia`) WHERE `a` > 1 | test | enabled | 2024-08-11 04:06:49.924 | 2024-08-11 04:06:49.924 | utf8 | utf8_general_ci | manual | 3c8dfc451b0e36afd904cefca5137e68fb051f02964e1958ed60afdadc25f57e | | ++---------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+------------+---------+-------------------------+-------------------------+---------+-----------------+--------+------------------------------------------------------------------+-------------+ +3 rows in set (0.001 sec) -mysql> SHOW BINDINGS\G -Empty set (0.01 sec) +> DROP GLOBAL BINDING FOR SQL DIGEST '31026623c8f22264fe0dfc26f29c69c5c457d6b85960c578ebcf17a967ed7893', '0f38b2e769927ae37981c66f0988c6299b602e03f029e38aa071e656fc321593', '3c8dfc451b0e36afd904cefca5137e68fb051f02964e1958ed60afdadc25f57e'; +Query OK, 3 rows affected (0.019 sec) -ERROR: -No query specified +> SHOW GLOBAL BINDINGS; +Empty set (0.002 sec) ``` ## MySQL compatibility From dfd8d4303a5300ab5bcb05eac4f3195945d575d0 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Fri, 16 Aug 2024 10:47:41 +0800 Subject: [PATCH 20/44] bdr ga (#18606) --- tidb-binlog/bidirectional-replication-between-tidb-clusters.md | 1 - 1 file changed, 1 deletion(-) diff --git a/tidb-binlog/bidirectional-replication-between-tidb-clusters.md b/tidb-binlog/bidirectional-replication-between-tidb-clusters.md index d8b3d37be622f..41988552a6f0f 100644 --- a/tidb-binlog/bidirectional-replication-between-tidb-clusters.md +++ b/tidb-binlog/bidirectional-replication-between-tidb-clusters.md @@ -8,7 +8,6 @@ aliases: ['/docs/dev/tidb-binlog/bidirectional-replication-between-tidb-clusters > **Warning:** > -> - Currently, bidirectional replication is still an experimental feature. It is **NOT** recommended to use it in the production environment. > - TiDB Binlog is not compatible with some features introduced in TiDB v5.0 and they cannot be used together. For details, see [Notes](/tidb-binlog/tidb-binlog-overview.md#notes). > - Starting from TiDB v7.5.0, technical support for the data replication feature of TiDB Binlog is no longer provided. It is strongly recommended to use [TiCDC](/ticdc/ticdc-overview.md) as an alternative solution for data replication. > - Although TiDB v7.5.0 still supports the real-time backup and restoration feature of TiDB Binlog, this component will be completely deprecated in future versions. It is recommended to use [PITR](/br/br-pitr-guide.md) as an alternative solution for data recovery. From df075263c6b9400dd21c775e258784e7e19bdf4f Mon Sep 17 00:00:00 2001 From: Grace Cai Date: Fri, 16 Aug 2024 15:56:41 +0800 Subject: [PATCH 21/44] move issue #36004 to v6.5.7 release notes (#18607) --- releases/release-6.5.7.md | 2 +- releases/release-6.5.8.md | 1 - releases/release-6.5.9.md | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/releases/release-6.5.7.md b/releases/release-6.5.7.md index 5d63eb623d118..2bbb8f59e2bac 100644 --- a/releases/release-6.5.7.md +++ b/releases/release-6.5.7.md @@ -44,7 +44,7 @@ Quick access: [Quick start](https://docs.pingcap.com/tidb/v6.5/quick-start-with- + TiDB - - Fix the issue that `stats_meta` is not created following table creation [#38189](https://github.com/pingcap/tidb/issues/38189) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) + - Fix the issue that TiDB might not simultaneously create the new statistics metadata when a large number of `CREATE TABLE` statements are executed in a short period of time, causing subsequent query estimation to fail to get accurate row count information [#36004](https://github.com/pingcap/tidb/issues/36004) [#38189](https://github.com/pingcap/tidb/issues/38189) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) - Fix the issue that TiDB server might consume a significant amount of resources when the enterprise plugin for audit logging is used [#49273](https://github.com/pingcap/tidb/issues/49273) @[lcwangchao](https://github.com/lcwangchao) - Fix the incorrect error message for `ErrLoadDataInvalidURI` (invalid S3 URI error) [#48164](https://github.com/pingcap/tidb/issues/48164) @[lance6716](https://github.com/lance6716) - Fix the issue that high CPU usage of TiDB occurs due to long-term memory pressure caused by `tidb_server_memory_limit` [#48741](https://github.com/pingcap/tidb/issues/48741) @[XuHuaiyu](https://github.com/XuHuaiyu) diff --git a/releases/release-6.5.8.md b/releases/release-6.5.8.md index 742b7d4c8c46e..872f0d5b79372 100644 --- a/releases/release-6.5.8.md +++ b/releases/release-6.5.8.md @@ -48,7 +48,6 @@ Quick access: [Quick start](https://docs.pingcap.com/tidb/v6.5/quick-start-with- - Fix the issue that executing `SELECT INTO OUTFILE` using the `PREPARE` method incorrectly returns a success message instead of an error [#49166](https://github.com/pingcap/tidb/issues/49166) @[qw4990](https://github.com/qw4990) - Fix the issue that executing `UNIQUE` index lookup with an `ORDER BY` clause might cause an error [#49920](https://github.com/pingcap/tidb/issues/49920) @[jackysp](https://github.com/jackysp) - Fix the issue that the `DELETE` and `UPDATE` statements using index lookup might report an error when `tidb_multi_statement_mode` mode is enabled [#50012](https://github.com/pingcap/tidb/issues/50012) @[tangenta](https://github.com/tangenta) - - Fix the issue that TiDB might not simultaneously establish the new statistics metadata when executing a large number of `CREATE TABLE` statements in a short period of time [#36004](https://github.com/pingcap/tidb/issues/36004) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) - Fix the issue that the `LEADING` hint does not take effect in `UNION ALL` statements [#50067](https://github.com/pingcap/tidb/issues/50067) @[hawkingrei](https://github.com/hawkingrei) - Fix the issue that using old interfaces might cause inconsistent metadata for tables [#49751](https://github.com/pingcap/tidb/issues/49751) @[hawkingrei](https://github.com/hawkingrei) - Fix the issue that common hints do not take effect in `UNION ALL` statements [#50068](https://github.com/pingcap/tidb/issues/50068) @[hawkingrei](https://github.com/hawkingrei) diff --git a/releases/release-6.5.9.md b/releases/release-6.5.9.md index d4699e224d41b..6e94ff6e38c29 100644 --- a/releases/release-6.5.9.md +++ b/releases/release-6.5.9.md @@ -47,7 +47,6 @@ Quick access: [Quick start](https://docs.pingcap.com/tidb/v6.5/quick-start-with- + TiDB - - Fix the issue that after a large number of tables are created, the newly created tables might lack the `stats_meta` information, causing subsequent query estimation to fail to get accurate row count information [#36004](https://github.com/pingcap/tidb/issues/36004) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) - Fix the issue that dropped tables are still counted by the Grafana `Stats Healthy Distribution` panel [#39349](https://github.com/pingcap/tidb/issues/39349) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) - Fix the issue that TiDB does not handle the `WHERE ` filtering condition in a SQL statement when the query of that statement involves the `MemTableScan` operator [#40937](https://github.com/pingcap/tidb/issues/40937) @[zhongzc](https://github.com/zhongzc) - Fix the issue that query results might be incorrect when the `HAVING` clause in a subquery contains correlated columns [#51107](https://github.com/pingcap/tidb/issues/51107) @[hawkingrei](https://github.com/hawkingrei) From 46bc6b59e547efdc313b3784818963056641a104 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Mon, 19 Aug 2024 07:54:13 +0200 Subject: [PATCH 22/44] partition: add doc for global index, also with GLOBAL IndexOption/ColumnOption (#18543) --- partitioned-table.md | 107 ++++++++++++++++++- placement-rules-in-sql.md | 19 ++-- sql-statements/sql-statement-add-column.md | 5 +- sql-statements/sql-statement-add-index.md | 3 + sql-statements/sql-statement-create-index.md | 2 + sql-statements/sql-statement-create-table.md | 6 +- system-variables.md | 6 +- 7 files changed, 134 insertions(+), 14 deletions(-) diff --git a/partitioned-table.md b/partitioned-table.md index 7bfe1e7849564..e3140b633ba3a 100644 --- a/partitioned-table.md +++ b/partitioned-table.md @@ -1476,7 +1476,11 @@ This section introduces some restrictions and limitations on partitioned tables ### Partitioning keys, primary keys and unique keys -This section discusses the relationship of partitioning keys with primary keys and unique keys. The rule governing this relationship can be expressed as follows: **Every unique key on the table must use every column in the table's partitioning expression**. This also includes the table's primary key, because it is by definition a unique key. +This section discusses the relationship of partitioning keys with primary keys and unique keys. The rule governing this relationship can be expressed as follows: **Every unique key on the table must use every column in the table's partitioning expression**. This also includes the table's primary key, because it is by definition a unique key. + +> **Note:** +> +> This rule only applies to the scenarios where the [`tidb_enable_global_index`](/system-variables.md#tidb_enable_global_index-new-in-v760) system variable is not enabled. When it is enabled, unique keys in partitioned tables are not required to include all the columns used in the partition expressions. For more information, see [global indexes](#global-indexes). For example, the following table creation statements are invalid: @@ -1685,6 +1689,107 @@ CREATE TABLE t (a varchar(20), b blob, ERROR 1503 (HY000): A UNIQUE INDEX must include all columns in the table's partitioning function ``` +#### Global indexes + +Before the introduction of global indexes, TiDB created a local index for each partition, leading to [a limitation](#partitioning-keys-primary-keys-and-unique-keys) that primary keys and unique keys had to include the partition key to ensure data uniqueness. Additionally, when querying data across multiple partitions, TiDB needed to scan the data of each partition to return results. + +To address these issues, TiDB introduces the global indexes feature in v8.3.0. A global index covers the data of the entire table with a single index, allowing primary keys and unique keys to maintain global uniqueness without including all partition keys. Moreover, global indexes can access data across multiple partitions in a single operation, significantly improving query performance for non-partitioned keys. + +> **Warning:** +> +> The global indexes feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed or removed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. + +To create a global index for a primary key or unique key that **does not include all the columns used in the partition expressions**, you can enable the [`tidb_enable_global_index`](/system-variables.md#tidb_enable_global_index-new-in-v760) system variable and add the `GLOBAL` keyword in the index definition. + +> **Note:** +> +> Global indexes affect partition management. `DROP`, `TRUNCATE`, and `REORGANIZE PARTITION` operations also trigger updates to table-level global indexes, meaning that these DDL operations will only return results after the global indexes of the corresponding tables are fully updated. + +```sql +SET tidb_enable_global_index = ON; + +CREATE TABLE t1 ( + col1 INT NOT NULL, + col2 DATE NOT NULL, + col3 INT NOT NULL, + col4 INT NOT NULL, + UNIQUE KEY uidx12(col1, col2) GLOBAL, + UNIQUE KEY uidx3(col3) +) +PARTITION BY HASH(col3) +PARTITIONS 4; +``` + +In the preceding example, the unique index `uidx12` is a global index, while `uidx3` is a regular unique index. + +Note that a **clustered index** cannot be a global index, as shown in the following example: + +```sql +SET tidb_enable_global_index = ON; + +CREATE TABLE t2 ( + col1 INT NOT NULL, + col2 DATE NOT NULL, + PRIMARY KEY (col2) CLUSTERED GLOBAL +) PARTITION BY HASH(col1) PARTITIONS 5; +``` + +``` +ERROR 1503 (HY000): A CLUSTERED INDEX must include all columns in the table's partitioning function +``` + +The reason is that if the clustered index is a global index, the table will no longer be partitioned. This is because the key of the clustered index is also the record key at the partition level, but the global index is at the table level, which causes a conflict. If you need to set the primary key as a global index, you must explicitly define it as a non-clustered index, for example, `PRIMARY KEY(col1, col2) NONCLUSTERED GLOBAL`. + +You can identify a global index by the `GLOBAL` index option in the [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) output. + +```sql +SHOW CREATE TABLE t1\G +``` + +``` + Table: t1 +Create Table: CREATE TABLE `t1` ( + `col1` int(11) NOT NULL, + `col2` date NOT NULL, + `col3` int(11) NOT NULL, + `col4` int(11) NOT NULL, + UNIQUE KEY `uidx12` (`col1`,`col2`) /*T![global_index] GLOBAL */, + UNIQUE KEY `uidx3` (`col3`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH (`col3`) PARTITIONS 4 +1 row in set (0.00 sec) +``` + +Alternatively, you can query the [`INFORMATION_SCHEMA.TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) table and check the `IS_GLOBAL` column in the output. + +```sql +SELECT * FROM INFORMATION_SCHEMA.TIDB_INDEXES WHERE table_name='t1'; +``` + +``` ++--------------+------------+------------+----------+--------------+-------------+----------+---------------+------------+----------+------------+-----------+-----------+ +| TABLE_SCHEMA | TABLE_NAME | NON_UNIQUE | KEY_NAME | SEQ_IN_INDEX | COLUMN_NAME | SUB_PART | INDEX_COMMENT | Expression | INDEX_ID | IS_VISIBLE | CLUSTERED | IS_GLOBAL | ++--------------+------------+------------+----------+--------------+-------------+----------+---------------+------------+----------+------------+-----------+-----------+ +| test | t1 | 0 | uidx12 | 1 | col1 | NULL | | NULL | 1 | YES | NO | 1 | +| test | t1 | 0 | uidx12 | 2 | col2 | NULL | | NULL | 1 | YES | NO | 1 | +| test | t1 | 0 | uidx3 | 1 | col3 | NULL | | NULL | 2 | YES | NO | 0 | ++--------------+------------+------------+----------+--------------+-------------+----------+---------------+------------+----------+------------+-----------+-----------+ +3 rows in set (0.00 sec) +``` + +When partitioning a non-partitioned table or repartitioning an already partitioned table, you can update the indexes to be global indexes or revert them to local indexes as needed: + +```sql +ALTER TABLE t1 PARTITION BY HASH (col1) PARTITIONS 3 UPDATE INDEXES (uidx12 LOCAL, uidx3 GLOBAL); +``` + +##### Limitations of global indexes + +- If the `GLOBAL` keyword is not explicitly specified in the index definition, TiDB creates a local index by default. +- The `GLOBAL` and `LOCAL` keywords only apply to partitioned tables and do not affect non-partitioned tables. In other words, there is no difference between a global index and a local index in non-partitioned tables. +- DDL operations such as `ADD PARTITION`, `DROP PARTITION`, `TRUNCATE PARTITION`, `REORGANIZE PARTITION`, `SPLIT PARTITION`, and `EXCHANGE PARTITION` also trigger updates to global indexes. The results of these DDL operations will only be returned after the global indexes of the corresponding tables are fully updated. This can delay operations that usually require quick DDL completion, such as data archiving operations (`EXCHANGE PARTITION`, `TRUNCATE PARTITION`, and `DROP PARTITION`). In contrast, when global indexes are not involved, these DDL operations can be completed immediately. +- By default, the primary key of a partitioned table is a clustered index and must include the partition key. If you require the primary key to exclude the partition key, you can explicitly specify the primary key as a non-clustered global index when creating the table, for example, `PRIMARY KEY(col1, col2) NONCLUSTERED GLOBAL`. + ### Partitioning limitations relating to functions Only the functions shown in the following list are allowed in partitioning expressions: diff --git a/placement-rules-in-sql.md b/placement-rules-in-sql.md index e100777c40e66..01b6c2540b7f3 100644 --- a/placement-rules-in-sql.md +++ b/placement-rules-in-sql.md @@ -297,14 +297,16 @@ ALTER TABLE t PLACEMENT POLICY=default; -- Removes the placement policy 'five_re You can also specify a placement policy for a partitioned table or a partition. For example: ```sql -CREATE PLACEMENT POLICY storageforhisotrydata CONSTRAINTS="[+node=history]"; +CREATE PLACEMENT POLICY storageforhistorydata CONSTRAINTS="[+node=history]"; CREATE PLACEMENT POLICY storagefornewdata CONSTRAINTS="[+node=new]"; CREATE PLACEMENT POLICY companystandardpolicy CONSTRAINTS=""; -CREATE TABLE t1 (id INT, name VARCHAR(50), purchased DATE) +SET tidb_enable_global_index = ON; + +CREATE TABLE t1 (id INT, name VARCHAR(50), purchased DATE, UNIQUE INDEX idx(id) GLOBAL) PLACEMENT POLICY=companystandardpolicy PARTITION BY RANGE( YEAR(purchased) ) ( - PARTITION p0 VALUES LESS THAN (2000) PLACEMENT POLICY=storageforhisotrydata, + PARTITION p0 VALUES LESS THAN (2000) PLACEMENT POLICY=storageforhistorydata, PARTITION p1 VALUES LESS THAN (2005), PARTITION p2 VALUES LESS THAN (2010), PARTITION p3 VALUES LESS THAN (2015), @@ -312,17 +314,18 @@ PARTITION BY RANGE( YEAR(purchased) ) ( ); ``` -If no placement policy is specified for a partition in a table, the partition attempts to inherit the policy (if any) from the table. In the preceding example: +If no placement policy is specified for a partition in a table, the partition attempts to inherit the policy (if any) from the table. If the table has a [global index](/partitioned-table.md#global-indexes), the index will apply the same placement policy as the table. In the preceding example: -- The `p0` partition will apply the `storageforhisotrydata` policy. +- The `p0` partition will apply the `storageforhistorydata` policy. - The `p4` partition will apply the `storagefornewdata` policy. - The `p1`, `p2`, and `p3` partitions will apply the `companystandardpolicy` placement policy inherited from the table `t1`. -- If no placement policy is specified for the table `t1`, the `p1`, `p2`, and `p3` partitions will inherit the database default policy or the global default policy. +- The global index `idx` will apply the same `companystandardpolicy` placement policy as the table `t1`. +- If no placement policy is specified for the table `t1`, then the `p1`, `p2`, and `p3` partitions and the global index `idx` will inherit the database default policy or the global default policy. After placement policies are attached to these partitions, you can change the placement policy for a specific partition as in the following example: ```sql -ALTER TABLE t1 PARTITION p1 PLACEMENT POLICY=storageforhisotrydata; +ALTER TABLE t1 PARTITION p1 PLACEMENT POLICY=storageforhistorydata; ``` ## High availability examples @@ -479,4 +482,4 @@ After executing the statements in the example, TiDB will place the `app_order` d | TiDB Lightning | Not compatible yet | An error is reported when TiDB Lightning imports backup data that contains placement policies | | TiCDC | 6.0 | Ignores placement policies, and does not replicate the policies to the downstream | - \ No newline at end of file + diff --git a/sql-statements/sql-statement-add-column.md b/sql-statements/sql-statement-add-column.md index 47ae0faa329e1..2c67d1fc2cea0 100644 --- a/sql-statements/sql-statement-add-column.md +++ b/sql-statements/sql-statement-add-column.md @@ -29,8 +29,8 @@ ColumnType ColumnOption ::= 'NOT'? 'NULL' | 'AUTO_INCREMENT' - | 'PRIMARY'? 'KEY' ( 'CLUSTERED' | 'NONCLUSTERED' )? - | 'UNIQUE' 'KEY'? + | 'PRIMARY'? 'KEY' ( 'CLUSTERED' | 'NONCLUSTERED' )? ( 'GLOBAL' | 'LOCAL' )? + | 'UNIQUE' 'KEY'? ( 'GLOBAL' | 'LOCAL' )? | 'DEFAULT' ( NowSymOptionFraction | SignedLiteral | NextValueForSequence ) | 'SERIAL' 'DEFAULT' 'VALUE' | 'ON' 'UPDATE' NowSymOptionFraction @@ -89,6 +89,7 @@ mysql> SELECT * FROM t1; * Adding a new column and setting it to the `PRIMARY KEY` is not supported. * Adding a new column and setting it to `AUTO_INCREMENT` is not supported. * There are limitations on adding generated columns, refer to: [generated column limitations](/generated-columns.md#limitations). +* Setting a [global index](/partitioned-table.md#global-indexes) by specifying `PRIMARY KEY` or `UNIQUE INDEX` as `GLOBAL` when you add a new column is a TiDB extension for [partitioned tables](/partitioned-table.md) and is not compatible with MySQL. ## See also diff --git a/sql-statements/sql-statement-add-index.md b/sql-statements/sql-statement-add-index.md index 0b21517e42be7..4715c6f721de3 100644 --- a/sql-statements/sql-statement-add-index.md +++ b/sql-statements/sql-statement-add-index.md @@ -43,6 +43,8 @@ IndexOption | 'COMMENT' stringLit | 'VISIBLE' | 'INVISIBLE' + | 'GLOBAL' + | 'LOCAL' IndexType ::= 'BTREE' @@ -90,6 +92,7 @@ mysql> EXPLAIN SELECT * FROM t1 WHERE c1 = 3; * TiDB supports parsing the `FULLTEXT` syntax but does not support using the `FULLTEXT` indexes. * Descending indexes are not supported (similar to MySQL 5.7). * Adding the primary key of the `CLUSTERED` type to a table is not supported. For more details about the primary key of the `CLUSTERED` type, refer to [clustered index](/clustered-indexes.md). +* Setting a `PRIMARY KEY` or `UNIQUE INDEX` as a [global index](/partitioned-table.md#global-indexes) with the `GLOBAL` index option is a TiDB extension for [partitioned tables](/partitioned-table.md) and is not compatible with MySQL. ## See also diff --git a/sql-statements/sql-statement-create-index.md b/sql-statements/sql-statement-create-index.md index 2935458f8755c..bf4217d945ecd 100644 --- a/sql-statements/sql-statement-create-index.md +++ b/sql-statements/sql-statement-create-index.md @@ -44,6 +44,7 @@ IndexOption ::= | 'WITH' 'PARSER' Identifier | 'COMMENT' stringLit | ("VISIBLE" | "INVISIBLE") +| ("GLOBAL" | "LOCAL") IndexTypeName ::= 'BTREE' @@ -382,6 +383,7 @@ The system variables associated with the `CREATE INDEX` statement are `tidb_ddl_ * Expression indexes are incompatible with views. When a query is executed using a view, the expression index cannot be used at the same time. * Expression indexes have compatibility issues with bindings. When the expression of an expression index has a constant, the binding created for the corresponding query expands its scope. For example, suppose that the expression in the expression index is `a+1`, and the corresponding query condition is `a+1 > 2`. In this case, the created binding is `a+? > ?`, which means that the query with the condition such as `a+2 > 2` is also forced to use the expression index and results in a poor execution plan. In addition, this also affects the baseline capturing and baseline evolution in SQL Plan Management (SPM). * The data written with multi-valued indexes must exactly match the defined data type. Otherwise, data writes fail. For details, see [create multi-valued indexes](/sql-statements/sql-statement-create-index.md#create-multi-valued-indexes). +* Setting a `UNIQUE KEY` as a [global index](/partitioned-table.md#global-indexes) with the `GLOBAL` index option is a TiDB extension for [partitioned tables](/partitioned-table.md) and is not compatible with MySQL. ## See also diff --git a/sql-statements/sql-statement-create-table.md b/sql-statements/sql-statement-create-table.md index 78be305013231..1cf408a2cd7b0 100644 --- a/sql-statements/sql-statement-create-table.md +++ b/sql-statements/sql-statement-create-table.md @@ -45,8 +45,8 @@ ColumnOptionList ::= ColumnOption ::= 'NOT'? 'NULL' | 'AUTO_INCREMENT' -| PrimaryOpt 'KEY' -| 'UNIQUE' 'KEY'? +| PrimaryOpt 'KEY' ( 'GLOBAL' | 'LOCAL' )? +| 'UNIQUE' 'KEY'? ( 'GLOBAL' | 'LOCAL' )? | 'DEFAULT' DefaultValueExpr | 'SERIAL' 'DEFAULT' 'VALUE' | 'ON' 'UPDATE' NowSymOptionFraction @@ -77,6 +77,7 @@ IndexOption ::= 'COMMENT' String | ( 'VISIBLE' | 'INVISIBLE' ) | ('USING' | 'TYPE') ('BTREE' | 'RTREE' | 'HASH') +| ( 'GLOBAL' | 'LOCAL' ) ForeignKeyDef ::= ( 'CONSTRAINT' Identifier )? 'FOREIGN' 'KEY' @@ -242,6 +243,7 @@ mysql> DESC t1; * All of the data types except spatial types are supported. * TiDB accepts index types such as `HASH`, `BTREE` and `RTREE` in syntax for compatibility with MySQL, but ignores them. * TiDB supports parsing the `FULLTEXT` syntax but does not support using the `FULLTEXT` indexes. +* Setting a `PRIMARY KEY` or `UNIQUE INDEX` as a [global index](/partitioned-table.md#global-indexes) with the `GLOBAL` index option is a TiDB extension for [partitioned tables](/partitioned-table.md) and is not compatible with MySQL. diff --git a/system-variables.md b/system-variables.md index e24e13a763671..d3a73d1b88d7e 100644 --- a/system-variables.md +++ b/system-variables.md @@ -2147,13 +2147,17 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; ### tidb_enable_global_index New in v7.6.0 +> **Warning:** +> +> The feature controlled by this variable is experimental. It is not recommended that you use it in the production environment. This feature might be changed or removed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. + - Scope: SESSION | GLOBAL - Persists to cluster: Yes - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No - Type: Boolean - Default value: `OFF` - Possible values: `OFF`, `ON` -- This variable controls whether to support creating `Global indexes` for partitioned tables. `Global index` is currently in the development stage. **It is not recommended to modify the value of this system variable**. +- This variable controls whether to support creating [global indexes](/partitioned-table.md#global-indexes) for partitioned tables. When this variable is enabled, TiDB allows you to create unique indexes that **do not include all the columns used in the partition expressions** by specifying `GLOBAL` in the index definition. ### tidb_enable_lazy_cursor_fetch New in v8.3.0 From 7f423962f549882c139ab24f07ba01a76e6015e8 Mon Sep 17 00:00:00 2001 From: xzhangxian1008 Date: Mon, 19 Aug 2024 18:15:12 +0800 Subject: [PATCH 23/44] Add doc for TopN spill (#18350) --- configure-memory-usage.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/configure-memory-usage.md b/configure-memory-usage.md index b338efe5c1045..25dfbd93bc0f4 100644 --- a/configure-memory-usage.md +++ b/configure-memory-usage.md @@ -149,8 +149,9 @@ TiDB supports disk spill for execution operators. When the memory usage of a SQL - The disk spill behavior is jointly controlled by the following parameters: [`tidb_mem_quota_query`](/system-variables.md#tidb_mem_quota_query), [`tidb_enable_tmp_storage_on_oom`](/system-variables.md#tidb_enable_tmp_storage_on_oom), [`tmp-storage-path`](/tidb-configuration-file.md#tmp-storage-path), and [`tmp-storage-quota`](/tidb-configuration-file.md#tmp-storage-quota). - When the disk spill is triggered, TiDB outputs a log containing the keywords `memory exceeds quota, spill to disk now` or `memory exceeds quota, set aggregate mode to spill-mode`. -- Disk spill for the Sort, MergeJoin, and HashJoin operators is introduced in v4.0.0; disk spill for the non-parallel algorithm of the HashAgg operator is introduced in v5.2.0; disk spill for the parallel algorithm of the HashAgg operator is introduced in v8.0.0 as an experimental feature and becomes generally available (GA) in v8.2.0. You can control whether to enable the parallel HashAgg algorithm that supports disk spill using the [`tidb_enable_parallel_hashagg_spill`](/system-variables.md#tidb_enable_parallel_hashagg_spill-new-in-v800) system variable. This variable will be deprecated in a future release. -- When the SQL executions containing Sort, MergeJoin, HashJoin, or HashAgg cause OOM, TiDB triggers disk spill by default. +- Disk spill for the Sort, MergeJoin, and HashJoin operators is introduced in v4.0.0. Disk spill for the non-parallel algorithm of the HashAgg operator is introduced in v5.2.0. Disk spill for the parallel algorithm of the HashAgg operator is introduced in v8.0.0 as an experimental feature and becomes generally available (GA) in v8.2.0. Disk spill for the TopN operator is introduced in v8.3.0. +- You can control whether to enable the parallel HashAgg algorithm that supports disk spill using the [`tidb_enable_parallel_hashagg_spill`](/system-variables.md#tidb_enable_parallel_hashagg_spill-new-in-v800) system variable. This variable will be deprecated in a future release. +- When the SQL executions containing Sort, MergeJoin, HashJoin, HashAgg, or TopN cause OOM, TiDB triggers disk spill by default. > **Note:** > From ee5f9d5eed3029d3e477e028be85c07437f360bd Mon Sep 17 00:00:00 2001 From: Arenatlx Date: Mon, 19 Aug 2024 18:21:13 +0800 Subject: [PATCH 24/44] add the root type expand impl for rollup syntax. (#18552) --- explain-aggregation.md | 2 +- functions-and-operators/group-by-modifier.md | 39 ++++++++++++++++---- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/explain-aggregation.md b/explain-aggregation.md index a0985cb748137..1d4b863a1b649 100644 --- a/explain-aggregation.md +++ b/explain-aggregation.md @@ -179,7 +179,7 @@ In the `GROUP BY` clause, you can specify one or more columns as a group list an > **Note:** > -> Currently, TiDB does not support the Cube syntax, and TiDB supports generating valid execution plans for the `WITH ROLLUP` syntax only in TiFlash MPP mode. +> Currently, TiDB does not support the Cube syntax. ```sql explain SELECT year, month, grouping(year), grouping(month), SUM(profit) AS profit FROM bank GROUP BY year, month WITH ROLLUP; diff --git a/functions-and-operators/group-by-modifier.md b/functions-and-operators/group-by-modifier.md index 710758e14387a..bdc6a83d7c290 100644 --- a/functions-and-operators/group-by-modifier.md +++ b/functions-and-operators/group-by-modifier.md @@ -36,14 +36,22 @@ Aggregating and summarizing data from multiple columns is commonly used in OLAP ## Prerequisites -Currently, TiDB supports generating valid execution plans for the `WITH ROLLUP` syntax only in TiFlash MPP mode. Therefore, make sure that your TiDB cluster has been deployed with TiFlash nodes and that target fact tables are configured with TiFlash replicas properly. - -For more information, see [Scale out a TiFlash cluster](/scale-tidb-using-tiup.md#scale-out-a-tiflash-cluster). +Before v8.3.0, TiDB only supports generating valid execution plans for the `WITH ROLLUP` syntax in [TiFlash MPP mode](/tiflash/use-tiflash-mpp-mode.md). Therefore, your TiDB cluster needs to contain TiFlash nodes, and the target table must be configured with the correct TiFlash replica. For more information, see [Scale out a TiFlash cluster](/scale-tidb-using-tiup.md#scale-out-a-tiflash-cluster). + + + + + +Before v8.3.0, TiDB only supports generating valid execution plans for the `WITH ROLLUP` syntax in [TiFlash MPP mode](/tiflash/use-tiflash-mpp-mode.md). Therefore, your TiDB cluster needs to contain TiFlash nodes, and the target table must be configured with the correct TiFlash replica. For more information, see [Change node number](/tidb-cloud/scale-tidb-cluster.md#change-node-number). +Starting from v8.3.0, the preceding limitation is removed. Regardless of whether your TiDB cluster contains TiFlash nodes, TiDB supports generating valid execution plans for the `WITH ROLLUP` syntax. + +To identify whether TiDB or TiFlash executes the `Expand` operator, you can check the `task` attribute of the `Expand` operator in the execution plan. For more information, see [How to interpret the ROLLUP execution plan](#how-to-interpret-the-rollup-execution-plan). + ## Examples Suppose you have a profit table named `bank` with the `year`, `month`, `day`, and `profit` columns. @@ -57,7 +65,7 @@ CREATE TABLE bank profit DECIMAL(13, 7) ); -ALTER TABLE bank SET TIFLASH REPLICA 1; -- Add a TiFlash replica for the table +ALTER TABLE bank SET TIFLASH REPLICA 1; -- Add a TiFlash replica for the table in TiFlash MPP mode. INSERT INTO bank VALUES(2000, "Jan", 1, 10.3),(2001, "Feb", 2, 22.4),(2000,"Mar", 3, 31.6) ``` @@ -162,14 +170,31 @@ SELECT year, month, SUM(profit) AS profit, grouping(year) as grp_year, grouping( ## How to interpret the ROLLUP execution plan -To meet the requirements of multidimensional grouping, multidimensional data aggregation uses the `Expand` operator to replicate data. Each replica corresponds to a group at a specific dimension. With the data shuffling capability of MPP, the `Expand` operator can rapidly reorganize and calculate a large volume of data between multiple TiFlash nodes, fully utilizing the computational power of each node. +Multidimensional data aggregation uses the `Expand` operator to copy data to meet the needs of multidimensional grouping. Each data copy corresponds to a grouping of a specific dimension. In MPP mode, the `Expand` operator can facilitate data shuffle to quickly reorganize and calculate a large amount of data between multiple nodes, making full use of the computing capacity of each node. In a TiDB cluster without TiFlash nodes, because the `Expand` operator is only executed on a single TiDB node, data redundancy will increase as the number of dimension groupings (`grouping set`) increases. The implementation of the `Expand` operator is similar to that of the `Projection` operator. The difference is that `Expand` is a multi-level `Projection`, which contains multiple levels of projection operation expressions. For each row of the raw data, the `Projection` operator generates only one row in results, whereas the `Expand` operator generates multiple rows in results (the number of rows is equal to the number of levels in projection operation expressions). -The following is an example of an execution plan: +The following example shows the execution plan for a TiDB cluster without TiFlash nodes, where the `task` of the `Expand` operator is `root`, indicating that the `Expand` operator is executed in TiDB: + +```sql +EXPLAIN SELECT year, month, grouping(year), grouping(month), SUM(profit) AS profit FROM bank GROUP BY year, month WITH ROLLUP; ++--------------------------------+---------+-----------+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| id | estRows | task | access object | operator info | ++--------------------------------+---------+-----------+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Projection_7 | 2.40 | root | | Column#6->Column#12, Column#7->Column#13, grouping(gid)->Column#14, grouping(gid)->Column#15, Column#9->Column#16 | +| └─HashAgg_8 | 2.40 | root | | group by:Column#6, Column#7, gid, funcs:sum(test.bank.profit)->Column#9, funcs:firstrow(Column#6)->Column#6, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(gid)->gid | +| └─Expand_12 | 3.00 | root | | level-projection:[test.bank.profit, ->Column#6, ->Column#7, 0->gid],[test.bank.profit, Column#6, ->Column#7, 1->gid],[test.bank.profit, Column#6, Column#7, 3->gid]; schema: [test.bank.profit,Column#6,Column#7,gid] | +| └─Projection_14 | 3.00 | root | | test.bank.profit, test.bank.year->Column#6, test.bank.month->Column#7 | +| └─TableReader_16 | 3.00 | root | | data:TableFullScan_15 | +| └─TableFullScan_15 | 3.00 | cop[tikv] | table:bank | keep order:false, stats:pseudo | ++--------------------------------+---------+-----------+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +6 rows in set (0.00 sec) +``` + +The following example shows the execution plan in TiFlash MPP mode, where the `task` of the `Expand` operator is `mpp[tiflash]`, indicating that the `Expand` operator is executed in TiFlash: ```sql -explain SELECT year, month, grouping(year), grouping(month), SUM(profit) AS profit FROM bank GROUP BY year, month WITH ROLLUP; +EXPLAIN SELECT year, month, grouping(year), grouping(month), SUM(profit) AS profit FROM bank GROUP BY year, month WITH ROLLUP; +----------------------------------------+---------+--------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | estRows | task | access object | operator info | +----------------------------------------+---------+--------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ From 2ec1371407a20ecbf404bb9bf8aa3ff313fdc06b Mon Sep 17 00:00:00 2001 From: guo-shaoge Date: Mon, 19 Aug 2024 18:25:42 +0800 Subject: [PATCH 25/44] system-variables: add tiflash_hashagg_preaggregation_mode (#18441) --- system-variables.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/system-variables.md b/system-variables.md index d3a73d1b88d7e..668f52d54db27 100644 --- a/system-variables.md +++ b/system-variables.md @@ -6158,6 +6158,19 @@ For details, see [Identify Slow Queries](/identify-slow-queries.md). +### tiflash_hashagg_preaggregation_mode New in v8.3.0 + +- Scope: SESSION | GLOBAL +- Persists to cluster: Yes +- Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): Yes +- Type: Enumeration +- Default value: `force_preagg` +- Value options: `force_preagg`, `force_streaming`, `auto` +- This variable controls the pre-aggregation strategy used during the first stage of two-stage or three-stage HashAgg operations pushed down to TiFlash: + - `force_preagg`: TiFlash forces pre-aggregation during the first stage of HashAgg. This behavior is consistent with the behavior before v8.3.0. + - `force_streaming`: TiFlash directly sends data to the next stage of HashAgg without pre-aggregation. + - `auto`: TiFlash automatically chooses whether to perform pre-aggregation based on the current workload's aggregation degree. + ### tikv_client_read_timeout New in v7.4.0 - Scope: SESSION | GLOBAL From eb946a8e557807721d348ef4732ad0f1c326624b Mon Sep 17 00:00:00 2001 From: Aolin Date: Mon, 19 Aug 2024 18:31:41 +0800 Subject: [PATCH 26/44] tiproxy: add vip configurations (#18498) --- config-templates/simple-tiproxy.yaml | 7 +++++++ tiproxy/tiproxy-configuration.md | 27 ++++++++++++++++++++++++-- tiproxy/tiproxy-deployment-topology.md | 2 +- tiproxy/tiproxy-grafana.md | 3 +++ tiproxy/tiproxy-load-balance.md | 11 ++++------- tiproxy/tiproxy-overview.md | 8 +++++--- 6 files changed, 45 insertions(+), 13 deletions(-) diff --git a/config-templates/simple-tiproxy.yaml b/config-templates/simple-tiproxy.yaml index 2fd24cd01d3fa..5e5bb5b28f0ea 100644 --- a/config-templates/simple-tiproxy.yaml +++ b/config-templates/simple-tiproxy.yaml @@ -5,6 +5,12 @@ global: ssh_port: 22 deploy_dir: "/tidb-deploy" data_dir: "/tidb-data" +component_versions: + tiproxy: "v1.2.0" +server_configs: + tiproxy: + ha.virtual-ip: "10.0.1.10/24" + ha.interface: "eth0" pd_servers: - host: 10.0.1.1 @@ -23,6 +29,7 @@ tikv_servers: tiproxy_servers: - host: 10.0.1.11 + - host: 10.0.1.12 monitoring_servers: - host: 10.0.1.13 diff --git a/tiproxy/tiproxy-configuration.md b/tiproxy/tiproxy-configuration.md index 15497b45d0c97..7a4b1ce2343a5 100644 --- a/tiproxy/tiproxy-configuration.md +++ b/tiproxy/tiproxy-configuration.md @@ -15,8 +15,9 @@ max-connections = 100 [api] addr = "0.0.0.0:3080" -[log] -level = "info" +[ha] +virtual-ip = "10.0.1.10/24" +interface = "eth0" [security] [security.cluster-tls] @@ -118,6 +119,28 @@ Configurations for the load balancing policy of TiProxy. + Possible values: `resource`, `location`, `connection` + Specifies the load balancing policy. For the meaning of each possible value, see [TiProxy load balancing policies](/tiproxy/tiproxy-load-balance.md#configure-load-balancing-policies). +### ha + +High availability configurations for TiProxy. + +#### `virtual-ip` + ++ Default value: `""` ++ Support hot-reload: no ++ Specifies the virtual IP address in the CIDR format, such as `"10.0.1.10/24"`. In a cluster with multiple TiProxy instances, only one instance binds to the virtual IP. If this instance goes offline, another TiProxy instance will automatically bind to the IP, ensuring clients can always connect to an available TiProxy through the virtual IP. + +> **Note:** +> +> - Virtual IP is only supported on Linux operating systems. +> - The Linux user running TiProxy must have permission to bind IP addresses. +> - The virtual IP and the IPs of all TiProxy instances must be within the same CIDR range. + +#### `interface` + ++ Default value: `""` ++ Support hot-reload: no ++ Specifies the network interface to bind the virtual IP to, such as `"eth0"`. The virtual IP will be bound to a TiProxy instance only when both [`ha.virtual-ip`](#virtual-ip) and `ha.interface` are set. + ### `labels` + Default value: `{}` diff --git a/tiproxy/tiproxy-deployment-topology.md b/tiproxy/tiproxy-deployment-topology.md index 22178eb203480..ccee9f4f5d7f6 100644 --- a/tiproxy/tiproxy-deployment-topology.md +++ b/tiproxy/tiproxy-deployment-topology.md @@ -16,7 +16,7 @@ TiProxy is a L7 proxy server for TiDB, which can balance connections and migrate | TiDB | 3 | 16 VCore 32GB * 3 | 10.0.1.4
10.0.1.5
10.0.1.6 | Default port
Global directory configuration | | PD | 3 | 4 VCore 8GB * 3 | 10.0.1.1
10.0.1.2
10.0.1.3 | Default port
Global directory configuration | | TiKV | 3 | 16 VCore 32GB 2TB (nvme ssd) * 3 | 10.0.1.7
10.0.1.8
10.0.1.9 | Default port
Global directory configuration | -| TiProxy | 1 | 4 VCore 8 GB * 1 | 10.0.1.11 | Default port
Global directory configuration | +| TiProxy | 2 | 4 VCore 8 GB * 1 | 10.0.1.11
10.0.1.12 | Default port
Global directory configuration | | Monitoring & Grafana | 1 | 4 VCore 8GB * 1 500GB (ssd) | 10.0.1.13 | Default port
Global directory configuration | ### Topology templates diff --git a/tiproxy/tiproxy-grafana.md b/tiproxy/tiproxy-grafana.md index bc41b1e8265a2..b592b098910ce 100644 --- a/tiproxy/tiproxy-grafana.md +++ b/tiproxy/tiproxy-grafana.md @@ -38,6 +38,9 @@ TiProxy has four panel groups. The metrics on these panels indicate the current - backend network break: fails to read from or write to the TiDB. This may be caused by a network problem or the TiDB server shutting down - backend handshake fail: TiProxy fails to handshake with the TiDB server - Goroutine Count: the number of Goroutines on each TiProxy instance +- Owner: the TiProxy instance that executes various tasks. For example, `10.24.31.1:3080 - vip` indicates that the TiProxy instance at `10.24.31.1:3080` is bound to a virtual IP. The tasks include the following: + - vip: binds a virtual IP + - metric_reader: reads monitoring data from TiDB servers ## Query-Summary diff --git a/tiproxy/tiproxy-load-balance.md b/tiproxy/tiproxy-load-balance.md index aa9cf327a30ee..5e8b6395be193 100644 --- a/tiproxy/tiproxy-load-balance.md +++ b/tiproxy/tiproxy-load-balance.md @@ -16,10 +16,7 @@ By default, TiProxy enables all policies with the following priorities: 5. Location-based load balancing: TiProxy prioritizes routing requests to the TiDB server geographically closest to TiProxy. 6. Connection count-based load balancing: when the connection count of a TiDB server is much higher than that of other TiDB servers, TiProxy migrates connections from that TiDB server to a TiDB server with fewer connections. -> **Note:** -> -> - Health-based, memory-based, and CPU-based load balancing policies depend on [Prometheus](https://prometheus.io). Ensure that Prometheus is available. Otherwise, these policies do not take effect. -> - To adjust the priorities of load balancing policies, see [Configure load balancing policies](#configure-load-balancing-policies). +To adjust the priorities of load balancing policies, see [Configure load balancing policies](#configure-load-balancing-policies). ## Status-based load balancing @@ -27,7 +24,7 @@ TiProxy periodically checks whether a TiDB server is offline or shutting down us ## Health-based load balancing -TiProxy determines the health of a TiDB server by querying its error count from Prometheus. When the health of a TiDB server is abnormal while others are normal, TiProxy migrates connections from that server to a healthy TiDB server, achieving automatic failover. +TiProxy determines the health of a TiDB server by querying its error count. When the health of a TiDB server is abnormal while others are normal, TiProxy migrates connections from that server to a healthy TiDB server, achieving automatic failover. This policy is suitable for the following scenarios: @@ -36,7 +33,7 @@ This policy is suitable for the following scenarios: ## Memory-based load balancing -TiProxy queries the memory usage of TiDB servers from Prometheus. When the memory usage of a TiDB server is rapidly increasing or reaching a high level, TiProxy migrates connections from that server to a TiDB server with lower memory usage, preventing unnecessary connection termination due to OOM. TiProxy does not guarantee identical memory usage across TiDB servers. This policy only takes effect when a TiDB server is at risk of OOM. +TiProxy queries the memory usage of TiDB servers. When the memory usage of a TiDB server is rapidly increasing or reaching a high level, TiProxy migrates connections from that server to a TiDB server with lower memory usage, preventing unnecessary connection termination due to OOM. TiProxy does not guarantee identical memory usage across TiDB servers. This policy only takes effect when a TiDB server is at risk of OOM. When a TiDB server is at risk of OOM, TiProxy attempts to migrate all connections from it. Usually, if OOM is caused by runaway queries, ongoing runaway queries will not be migrated to another TiDB server for re-execution, because these connections can only be migrated after the transaction is complete. @@ -48,7 +45,7 @@ This policy has the following limitations: ## CPU-based load balancing -TiProxy queries the CPU usage of TiDB servers from Prometheus and migrates connections from a TiDB server with high CPU usage to a server with lower usage, reducing overall query latency. TiProxy does not guarantee identical CPU usage across TiDB servers but ensures that the CPU usage differences are minimized. +TiProxy queries the CPU usage of TiDB servers and migrates connections from a TiDB server with high CPU usage to a server with lower usage, reducing overall query latency. TiProxy does not guarantee identical CPU usage across TiDB servers but ensures that the CPU usage differences are minimized. This policy is suitable for the following scenarios: diff --git a/tiproxy/tiproxy-overview.md b/tiproxy/tiproxy-overview.md index 1ca43e8ea514f..5ae207e7b77d4 100644 --- a/tiproxy/tiproxy-overview.md +++ b/tiproxy/tiproxy-overview.md @@ -40,7 +40,7 @@ When a TiDB server performs scaling in or scaling out, if you use a common load ### Quick deployment -TiProxy is integrated into [TiUP](https://github.com/pingcap/tiup), [TiDB Operator](https://github.com/pingcap/tidb-operator), [TiDB Dashboard](/dashboard/dashboard-intro.md), and [Grafana](/tiproxy/tiproxy-grafana.md), which reduces the deployment, operation, and management costs. +TiProxy is integrated into [TiUP](https://github.com/pingcap/tiup), [TiDB Operator](https://github.com/pingcap/tidb-operator), [TiDB Dashboard](/dashboard/dashboard-intro.md), and [Grafana](/tiproxy/tiproxy-grafana.md), and supports built-in virtual IP management, reducing the deployment, operation, and management costs. ## User scenarios @@ -91,7 +91,7 @@ This section describes how to deploy and change TiProxy using TiUP. For how to d 3. Configure the TiProxy instances. - To ensure the high availability of TiProxy, it is recommended to deploy at least two TiProxy instances. You can use hardware load balancers to distribute traffic to each TiProxy instance, or configure virtual IP to route the traffic to the available TiProxy instance. + To ensure the high availability of TiProxy, it is recommended to deploy at least two TiProxy instances and configure a virtual IP by setting [`ha.virtual-ip`](/tiproxy/tiproxy-configuration.md#virtual-ip) and [`ha.interface`](/tiproxy/tiproxy-configuration.md#interface) to route the traffic to the available TiProxy instance. When selecting the model and number of TiProxy instances, consider the following factors: @@ -106,12 +106,14 @@ This section describes how to deploy and change TiProxy using TiUP. For how to d ```yaml component_versions: - tiproxy: "v1.0.0" + tiproxy: "v1.2.0" server_configs: tiproxy: security.server-tls.ca: "/var/ssl/ca.pem" security.server-tls.cert: "/var/ssl/cert.pem" security.server-tls.key: "/var/ssl/key.pem" + ha.virtual-ip: "10.0.1.10/24" + ha.interface: "eth0" ``` 4. Start the cluster. From 19d590015ed4eeec459bf3e7d59ed024d1e95c8d Mon Sep 17 00:00:00 2001 From: Will DeVries <1624341+wddevries@users.noreply.github.com> Date: Mon, 19 Aug 2024 08:55:42 -0700 Subject: [PATCH 27/44] Remove (R) from STATS_EXTENDED in keywords list. (#18037) --- keywords.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keywords.md b/keywords.md index 97cc1bfb75c79..54cf58c2e61a7 100644 --- a/keywords.md +++ b/keywords.md @@ -647,7 +647,7 @@ The following list shows the keywords in TiDB. Reserved keywords are marked with - STATS_BUCKETS - STATS_COL_CHOICE - STATS_COL_LIST -- STATS_EXTENDED (R) +- STATS_EXTENDED - STATS_HEALTHY - STATS_HISTOGRAMS - STATS_LOCKED From f098eea2e6b61ee54dd27342ec334e8bf2c2cea4 Mon Sep 17 00:00:00 2001 From: Jianjun Liao <36503113+Leavrth@users.noreply.github.com> Date: Tue, 20 Aug 2024 09:37:41 +0800 Subject: [PATCH 28/44] br: add issue for pitr usage (#18468) --- br/br-pitr-manual.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/br/br-pitr-manual.md b/br/br-pitr-manual.md index 5a8e128d5d627..4ee61ec75bc00 100644 --- a/br/br-pitr-manual.md +++ b/br/br-pitr-manual.md @@ -396,6 +396,6 @@ Restore KV Files <-------------------------------------------------------------- > **Note:** > -> - When you restore the cluster for the first time, you must specify the full snapshot data. Otherwise, some data in the newly created table might be incorrect due to rewriting Table ID rules. +> - When you restore the cluster for the first time, you must specify the full snapshot data. Otherwise, some data in the newly created table might be incorrect due to rewriting Table ID rules. For more information, see GitHub issue [#54418](https://github.com/pingcap/tidb/issues/54418). > - You cannot restore the log backup data of a certain time period repeatedly. If you restore the log backup data of a range `[t1=10, t2=20)` repeatedly, the restored data might be inconsistent. > - When you restore log data of different time periods in multiple batches, ensure that the log data is restored in consecutive order. If you restore the log backup data of `[t1, t2)`, `[t2, t3)`, and `[t3, t4)` in consecutive order, the restored data is consistent. However, if you restore `[t1, t2)` and then skip `[t2, t3)` to restore `[t3, t4)`, the restored data might be inconsistent. From bb94d49f4c1cd00d9292a1130deb46c209e6e4c7 Mon Sep 17 00:00:00 2001 From: Lilian Lee Date: Tue, 20 Aug 2024 10:01:42 +0800 Subject: [PATCH 29/44] Deprecate TiDB Binlog (#18569) --- TOC.md | 2 +- basic-features.md | 2 +- ecosystem-tool-user-guide.md | 4 ++++ faq/deploy-and-maintain-faq.md | 2 +- production-deployment-using-tiup.md | 2 +- .../sql-statement-flashback-database.md | 2 +- sql-statements/sql-statement-flashback-table.md | 2 +- sql-statements/sql-statement-recover-table.md | 2 +- system-variables.md | 6 +++--- ticdc-deployment-topology.md | 2 +- tidb-binlog-deployment-topology.md | 6 ++++-- ...rectional-replication-between-tidb-clusters.md | 3 +-- tidb-binlog/binlog-control.md | 2 +- tidb-binlog/deploy-tidb-binlog.md | 4 ++++ tidb-binlog/get-started-with-tidb-binlog.md | 2 +- tidb-binlog/tidb-binlog-overview.md | 15 ++++++--------- tidb-binlog/upgrade-tidb-binlog.md | 5 ++--- tidb-troubleshooting-map.md | 2 +- transaction-overview.md | 2 +- upgrade-tidb-using-tiup.md | 2 +- 20 files changed, 37 insertions(+), 32 deletions(-) diff --git a/TOC.md b/TOC.md index 494098945fadd..4b281c3706fa3 100644 --- a/TOC.md +++ b/TOC.md @@ -604,7 +604,7 @@ - [Troubleshoot](/ticdc/troubleshoot-ticdc.md) - [FAQs](/ticdc/ticdc-faq.md) - [Glossary](/ticdc/ticdc-glossary.md) - - TiDB Binlog + - TiDB Binlog (Deprecated) - [Overview](/tidb-binlog/tidb-binlog-overview.md) - [Quick Start](/tidb-binlog/get-started-with-tidb-binlog.md) - [Deploy](/tidb-binlog/deploy-tidb-binlog.md) diff --git a/basic-features.md b/basic-features.md index b2ffd05da3c5e..8793addb4c925 100644 --- a/basic-features.md +++ b/basic-features.md @@ -272,4 +272,4 @@ You can try out TiDB features on [TiDB Playground](https://play.tidbcloud.com/?u [^5]: Starting from [TiDB v7.0.0](/releases/release-7.0.0.md), the new parameter `FIELDS DEFINED NULL BY` and support for importing data from S3 and GCS are experimental features. Starting from [v7.6.0](/releases/release-7.6.0.md), TiDB processes `LOAD DATA` in transactions in the same way as MySQL. The `LOAD DATA` statement in a transaction no longer automatically commits the current transaction or starts a new transaction. Moreover, you can explicitly commit or roll back the `LOAD DATA` statement in a transaction. Additionally, the `LOAD DATA` statement is affected by the TiDB transaction mode setting (optimistic or pessimistic transaction). -[^6]: Starting from TiDB v7.5.0, technical support for the data replication feature of [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) is no longer provided. It is strongly recommended to use [TiCDC](/ticdc/ticdc-overview.md) as an alternative solution for data replication. Although TiDB Binlog v7.5.0 still supports the Point-in-Time Recovery (PITR) scenario, this component will be completely deprecated in future versions. It is recommended to use [PITR](/br/br-pitr-guide.md) as an alternative solution for data recovery. +[^6]: Starting from v7.5.0, [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) replication is deprecated. Starting from v8.3.0, TiDB Binlog is fully deprecated, with removal planned for a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). diff --git a/ecosystem-tool-user-guide.md b/ecosystem-tool-user-guide.md index 60b1f17d81512..49e2d3312fdcd 100644 --- a/ecosystem-tool-user-guide.md +++ b/ecosystem-tool-user-guide.md @@ -127,6 +127,10 @@ The following are the basics of TiCDC: [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) is a tool that collects binlog for TiDB clusters and provides nearly real-time data replication and backup. You can use it for incremental data replication between TiDB clusters, such as making a TiDB cluster the secondary cluster of the primary TiDB cluster. +> **Warning:** +> +> Starting from v7.5.0, [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) replication is deprecated. Starting from v8.3.0, TiDB Binlog is fully deprecated, with removal planned for a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). + The following are the basics of TiDB Binlog: - Source: TiDB clusters diff --git a/faq/deploy-and-maintain-faq.md b/faq/deploy-and-maintain-faq.md index cf9577c84ec28..87ece55c0dfaf 100644 --- a/faq/deploy-and-maintain-faq.md +++ b/faq/deploy-and-maintain-faq.md @@ -27,7 +27,7 @@ If the resources are adequate, it is recommended to use RAID 10 for SSD. If the ### What's the recommended configuration of TiDB components? -- TiDB has a high requirement on CPU and memory. If you need to enable TiDB Binlog, the local disk space should be increased based on the service volume estimation and the time requirement for the GC operation. But the SSD disk is not a must. +- TiDB has a high requirement on CPU and memory. If you need to enable TiDB Binlog (deprecated), the local disk space should be increased based on the service volume estimation and the time requirement for the GC operation. But the SSD disk is not a must. - PD stores the cluster metadata and has frequent Read and Write requests. It demands a high I/O disk. A disk of low performance will affect the performance of the whole cluster. It is recommended to use SSD disks. In addition, a larger number of Regions has a higher requirement on CPU and memory. - TiKV has a high requirement on CPU, memory and disk. It is required to use SSD. diff --git a/production-deployment-using-tiup.md b/production-deployment-using-tiup.md index d774736c918a1..d7f81b361cfd7 100644 --- a/production-deployment-using-tiup.md +++ b/production-deployment-using-tiup.md @@ -8,7 +8,7 @@ aliases: ['/docs/dev/production-deployment-using-tiup/','/docs/dev/how-to/deploy [TiUP](https://github.com/pingcap/tiup) is a cluster operation and maintenance tool introduced in TiDB 4.0. TiUP provides [TiUP cluster](https://github.com/pingcap/tiup/tree/master/components/cluster), a cluster management component written in Golang. By using TiUP cluster, you can easily perform daily database operations, including deploying, starting, stopping, destroying, scaling, and upgrading a TiDB cluster, and manage TiDB cluster parameters. -TiUP supports deploying TiDB, TiFlash, TiDB Binlog, TiCDC, and the monitoring system. This document introduces how to deploy TiDB clusters of different topologies. +TiUP supports deploying TiDB, TiFlash, [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) (deprecated), TiCDC, and the monitoring system. This document introduces how to deploy TiDB clusters of different topologies. ## Step 1. Prerequisites and precheck diff --git a/sql-statements/sql-statement-flashback-database.md b/sql-statements/sql-statement-flashback-database.md index 485a97dfcabcb..5534d31294a8f 100644 --- a/sql-statements/sql-statement-flashback-database.md +++ b/sql-statements/sql-statement-flashback-database.md @@ -36,7 +36,7 @@ FlashbackToNewName ::= * You cannot restore the same database multiple times using the `FLASHBACK DATABASE` statement. Because the database restored by `FLASHBACK DATABASE` has the same schema ID as the original database, restoring the same database multiple times leads to duplicate schema IDs. In TiDB, the database schema ID must be globally unique. -* When TiDB Binlog is enabled, note the following when you use `FLASHBACK DATABASE`: +* When TiDB Binlog (deprecated) is enabled, note the following when you use `FLASHBACK DATABASE`: * The downstream secondary database must support `FLASHBACK DATABASE`. * The GC life time of the secondary database must be longer than that of the primary database. Otherwise, the latency between the upstream and the downstream might lead to data restoration failure in the downstream. diff --git a/sql-statements/sql-statement-flashback-table.md b/sql-statements/sql-statement-flashback-table.md index 3cdfcb373720e..dfe9f0a82d1e3 100644 --- a/sql-statements/sql-statement-flashback-table.md +++ b/sql-statements/sql-statement-flashback-table.md @@ -43,7 +43,7 @@ FlashbackToNewName ::= If a table is dropped and the GC lifetime has passed, you can no longer use the `FLASHBACK TABLE` statement to recover the dropped data. Otherwise, an error like `Can't find dropped / truncated table 't' in GC safe point 2020-03-16 16:34:52 +0800 CST` will be returned. -Pay attention to the following conditions and requirements when you enable TiDB Binlog and use the `FLASHBACK TABLE` statement: +Pay attention to the following conditions and requirements when you enable TiDB Binlog (deprecated) and use the `FLASHBACK TABLE` statement: * The downstream secondary cluster must also support `FLASHBACK TABLE`. * The GC lifetime of the secondary cluster must be longer than that of the primary cluster. diff --git a/sql-statements/sql-statement-recover-table.md b/sql-statements/sql-statement-recover-table.md index 87f12aa90e753..5bfb62e9259f2 100644 --- a/sql-statements/sql-statement-recover-table.md +++ b/sql-statements/sql-statement-recover-table.md @@ -40,7 +40,7 @@ NUM ::= intLit > > + If a table is deleted and the GC lifetime is out, the table cannot be recovered with `RECOVER TABLE`. Execution of `RECOVER TABLE` in this scenario returns an error like: `snapshot is older than GC safe point 2019-07-10 13:45:57 +0800 CST`. > -> + If the TiDB version is 3.0.0 or later, it is not recommended for you to use `RECOVER TABLE` when TiDB Binlog is used. +> + If the TiDB version is 3.0.0 or later, it is not recommended for you to use `RECOVER TABLE` when TiDB Binlog (deprecated) is used. > > + `RECOVER TABLE` is supported in the Binlog version 3.0.1, so you can use `RECOVER TABLE` in the following three situations: > diff --git a/system-variables.md b/system-variables.md index 668f52d54db27..2e3dfa206ea3f 100644 --- a/system-variables.md +++ b/system-variables.md @@ -650,7 +650,7 @@ This variable is an alias for [`last_insert_id`](#last_insert_id). - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No - Type: Boolean - Default value: `OFF` -- This variable indicates whether [TiDB Binlog](https://docs.pingcap.com/tidb/stable/tidb-binlog-overview) is used. +- This variable indicates whether [TiDB Binlog](https://docs.pingcap.com/tidb/stable/tidb-binlog-overview) (deprecated) is used. ### max_allowed_packet New in v6.1.0 @@ -1865,7 +1865,7 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; > **Note:** > > - The default value of `ON` only applies to new clusters. if your cluster was upgraded from an earlier version of TiDB, the value `OFF` will be used instead. -> - If you have enabled TiDB Binlog, enabling this variable cannot improve the performance. To improve the performance, it is recommended to use [TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview) instead. +> - If you have enabled TiDB Binlog (deprecated), enabling this variable cannot improve the performance. To improve the performance, it is recommended to use [TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview) instead. > - Enabling this parameter only means that one-phase commit becomes an optional mode of transaction commit. In fact, the most suitable mode of transaction commit is determined by TiDB. ### tidb_enable_analyze_snapshot New in v6.2.0 @@ -1898,7 +1898,7 @@ mysql> SELECT job_info FROM mysql.analyze_jobs ORDER BY end_time DESC LIMIT 1; > **Note:** > > - The default value of `ON` only applies to new clusters. if your cluster was upgraded from an earlier version of TiDB, the value `OFF` will be used instead. -> - If you have enabled TiDB Binlog, enabling this variable cannot improve the performance. To improve the performance, it is recommended to use [TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview) instead. +> - If you have enabled TiDB Binlog (deprecated), enabling this variable cannot improve the performance. To improve the performance, it is recommended to use [TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview) instead. > - Enabling this parameter only means that Async Commit becomes an optional mode of transaction commit. In fact, the most suitable mode of transaction commit is determined by TiDB. ### tidb_enable_auto_analyze New in v6.1.0 diff --git a/ticdc-deployment-topology.md b/ticdc-deployment-topology.md index 75a724821e15c..a2ef9c8acfe8f 100644 --- a/ticdc-deployment-topology.md +++ b/ticdc-deployment-topology.md @@ -12,7 +12,7 @@ aliases: ['/docs/dev/ticdc-deployment-topology/'] This document describes the deployment topology of [TiCDC](/ticdc/ticdc-overview.md) based on the minimal cluster topology. -TiCDC is a tool for replicating the incremental data of TiDB, introduced in TiDB 4.0. It supports multiple downstream platforms, such as TiDB, MySQL, Kafka, MQ, and storage services. Compared with TiDB Binlog, TiCDC has lower latency and native high availability. +TiCDC is a tool for replicating the incremental data of TiDB, introduced in TiDB 4.0. It supports multiple downstream platforms, such as TiDB, MySQL, Kafka, MQ, and storage services. Compared with TiDB Binlog (deprecated), TiCDC has lower latency and native high availability. ## Topology information diff --git a/tidb-binlog-deployment-topology.md b/tidb-binlog-deployment-topology.md index 3f991426c7b2b..492d8ce62b430 100644 --- a/tidb-binlog-deployment-topology.md +++ b/tidb-binlog-deployment-topology.md @@ -6,9 +6,11 @@ aliases: ['/docs/dev/tidb-binlog-deployment-topology/'] # TiDB Binlog Deployment Topology -This document describes the deployment topology of [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) based on the minimal TiDB topology. +This document describes the deployment topology of [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) based on the minimal TiDB topology. TiDB Binlog provides near real-time backup and replication. -TiDB Binlog is the widely used component for replicating incremental data. It provides near real-time backup and replication. +> **Warning:** +> +> Starting from v7.5.0, [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) replication is deprecated. Starting from v8.3.0, TiDB Binlog is fully deprecated, with removal planned for a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). ## Topology information diff --git a/tidb-binlog/bidirectional-replication-between-tidb-clusters.md b/tidb-binlog/bidirectional-replication-between-tidb-clusters.md index 41988552a6f0f..64e5675b0efb3 100644 --- a/tidb-binlog/bidirectional-replication-between-tidb-clusters.md +++ b/tidb-binlog/bidirectional-replication-between-tidb-clusters.md @@ -8,9 +8,8 @@ aliases: ['/docs/dev/tidb-binlog/bidirectional-replication-between-tidb-clusters > **Warning:** > +> - Starting from v7.5.0, [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) replication is deprecated. Starting from v8.3.0, TiDB Binlog is fully deprecated, with removal planned for a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). > - TiDB Binlog is not compatible with some features introduced in TiDB v5.0 and they cannot be used together. For details, see [Notes](/tidb-binlog/tidb-binlog-overview.md#notes). -> - Starting from TiDB v7.5.0, technical support for the data replication feature of TiDB Binlog is no longer provided. It is strongly recommended to use [TiCDC](/ticdc/ticdc-overview.md) as an alternative solution for data replication. -> - Although TiDB v7.5.0 still supports the real-time backup and restoration feature of TiDB Binlog, this component will be completely deprecated in future versions. It is recommended to use [PITR](/br/br-pitr-guide.md) as an alternative solution for data recovery. This document describes the bidirectional replication between two TiDB clusters, how the replication works, how to enable it, and how to replicate DDL operations. diff --git a/tidb-binlog/binlog-control.md b/tidb-binlog/binlog-control.md index 94b65503a82ae..c0aa292fbb577 100644 --- a/tidb-binlog/binlog-control.md +++ b/tidb-binlog/binlog-control.md @@ -6,7 +6,7 @@ aliases: ['/docs/dev/tidb-binlog/binlog-control/'] # binlogctl -[Binlog Control](https://github.com/pingcap/tidb-binlog/tree/master/binlogctl) (`binlogctl` for short) is a command line tool for TiDB Binlog. You can use `binlogctl` to manage TiDB Binlog clusters. +[Binlog Control](https://github.com/pingcap/tidb-binlog/tree/master/binlogctl) (`binlogctl` for short) is a command line tool for [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) (deprecated). You can use `binlogctl` to manage TiDB Binlog clusters. You can use `binlogctl` to: diff --git a/tidb-binlog/deploy-tidb-binlog.md b/tidb-binlog/deploy-tidb-binlog.md index 10699145008c0..cfc17ee160f50 100644 --- a/tidb-binlog/deploy-tidb-binlog.md +++ b/tidb-binlog/deploy-tidb-binlog.md @@ -8,6 +8,10 @@ aliases: ['/docs/dev/tidb-binlog/deploy-tidb-binlog/','/docs/dev/reference/tidb- This document describes how to [deploy TiDB Binlog using a Binary package](#deploy-tidb-binlog-using-a-binary-package). +> **Warning:** +> +> Starting from TiDB v8.3.0, TiDB Binlog is deprecated, and is planned to be removed in a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). + ## Hardware requirements Pump and Drainer are deployed and operate on 64-bit universal hardware server platforms with Intel x86-64 architecture. diff --git a/tidb-binlog/get-started-with-tidb-binlog.md b/tidb-binlog/get-started-with-tidb-binlog.md index ed01280325f66..27cc03cae5c45 100644 --- a/tidb-binlog/get-started-with-tidb-binlog.md +++ b/tidb-binlog/get-started-with-tidb-binlog.md @@ -6,7 +6,7 @@ aliases: ['/docs/dev/get-started-with-tidb-binlog/','/docs/dev/how-to/get-starte # TiDB Binlog Tutorial -This tutorial starts with a simple TiDB Binlog deployment with a single node of each component (Placement Driver, TiKV Server, TiDB Server, Pump, and Drainer), set up to push data into a MariaDB Server instance. +This tutorial starts with a simple [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) (deprecated) deployment with a single node of each component (Placement Driver, TiKV Server, TiDB Server, Pump, and Drainer), set up to push data into a MariaDB Server instance. This tutorial is targeted toward users who have some familiarity with the [TiDB Architecture](/tidb-architecture.md), who may have already set up a TiDB cluster (not mandatory), and who wants to gain hands-on experience with TiDB Binlog. This tutorial is a good way to "kick the tires" of TiDB Binlog and to familiarize yourself with the concepts of its architecture. diff --git a/tidb-binlog/tidb-binlog-overview.md b/tidb-binlog/tidb-binlog-overview.md index 6ac124eddceca..f6909668c4ad6 100644 --- a/tidb-binlog/tidb-binlog-overview.md +++ b/tidb-binlog/tidb-binlog-overview.md @@ -6,21 +6,18 @@ aliases: ['/docs/dev/tidb-binlog/tidb-binlog-overview/','/docs/dev/reference/tid # TiDB Binlog Cluster Overview -This document introduces the architecture and the deployment of the cluster version of TiDB Binlog. +TiDB Binlog is a tool used to collect binlog data from TiDB and provide near real-time backup and replication to downstream platforms. This document introduces the architecture and the deployment of the cluster version of TiDB Binlog. -TiDB Binlog is a tool used to collect binlog data from TiDB and provide near real-time backup and replication to downstream platforms. +> **Warning:** +> +> - Starting from v7.5.0, TiDB Binlog replication is deprecated. Starting from v8.3.0, TiDB Binlog is fully deprecated, with removal planned for a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). +> - TiDB Binlog is not compatible with some features introduced in TiDB v5.0 and they cannot be used together. For details, see [Notes](#notes). TiDB Binlog has the following features: * **Data replication:** replicate the data in the TiDB cluster to other databases * **Real-time backup and restoration:** back up the data in the TiDB cluster and restore the TiDB cluster when the cluster fails -> **Note:** -> -> - TiDB Binlog is not compatible with some features introduced in TiDB v5.0 and they cannot be used together. For details, see [Notes](#notes). -> - Starting from TiDB v7.5.0, technical support for the data replication feature of TiDB Binlog is no longer provided. It is strongly recommended to use [TiCDC](/ticdc/ticdc-overview.md) as an alternative solution for data replication. -> - Although TiDB v7.5.0 still supports the real-time backup and restoration feature of TiDB Binlog, this component will be completely deprecated in future versions. It is recommended to use [PITR](/br/br-pitr-guide.md) as an alternative solution for data recovery. - ## TiDB Binlog architecture The TiDB Binlog architecture is as follows: @@ -67,7 +64,7 @@ The TiDB Binlog cluster is composed of Pump and Drainer. - TiDB system variable [tidb_enable_async_commit](/system-variables.md#tidb_enable_async_commit-new-in-v50): After TiDB Binlog is enabled, performance cannot be improved by enabling this option. It is recommended to use [TiCDC](/ticdc/ticdc-overview.md) instead of TiDB Binlog. - TiDB system variable [tidb_enable_1pc](/system-variables.md#tidb_enable_1pc-new-in-v50): After TiDB Binlog is enabled, performance cannot be improved by enabling this option. It is recommended to use [TiCDC](/ticdc/ticdc-overview.md) instead of TiDB Binlog. -* Drainer supports replicating binlogs to MySQL, TiDB, Kafka or local files. If you need to replicate binlogs to other Drainer unsuppored destinations, you can set Drainer to replicate the binlog to Kafka and read the data in Kafka for customized processing according to binlog consumer protocol. See [Binlog Consumer Client User Guide](/tidb-binlog/binlog-consumer-client.md). +* Drainer supports replicating binlogs to MySQL, TiDB, Kafka or local files. If you need to replicate binlogs to other Drainer unsupported destinations, you can set Drainer to replicate the binlog to Kafka and read the data in Kafka for customized processing according to binlog consumer protocol. See [Binlog Consumer Client User Guide](/tidb-binlog/binlog-consumer-client.md). * To use TiDB Binlog for recovering incremental data, set the config `db-type` to `file` (local files in the proto buffer format). Drainer converts the binlog to data in the specified [proto buffer format](https://github.com/pingcap/tidb-binlog/blob/master/proto/pb_binlog.proto) and writes the data to local files. In this way, you can use [Reparo](/tidb-binlog/tidb-binlog-reparo.md) to recover data incrementally. diff --git a/tidb-binlog/upgrade-tidb-binlog.md b/tidb-binlog/upgrade-tidb-binlog.md index 03829113fc26e..c86b385aab11e 100644 --- a/tidb-binlog/upgrade-tidb-binlog.md +++ b/tidb-binlog/upgrade-tidb-binlog.md @@ -8,11 +8,10 @@ aliases: ['/docs/dev/tidb-binlog/upgrade-tidb-binlog/','/docs/dev/reference/tidb This document introduces how to upgrade TiDB Binlog that is deployed manually to the latest [cluster](/tidb-binlog/tidb-binlog-overview.md) version. There is also a section on how to upgrade TiDB Binlog from an earlier incompatible version (Kafka/Local version) to the latest version. -> **Note:** +> **Warning:** > +> - Starting from v7.5.0, [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) replication is deprecated. Starting from v8.3.0, TiDB Binlog is fully deprecated, with removal planned for a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). > - TiDB Binlog is not compatible with some features introduced in TiDB v5.0 and they cannot be used together. For details, see [Notes](/tidb-binlog/tidb-binlog-overview.md#notes). -> - Starting from TiDB v7.5.0, technical support for the data replication feature of TiDB Binlog is no longer provided. It is strongly recommended to use [TiCDC](/ticdc/ticdc-overview.md) as an alternative solution for data replication. -> - Although TiDB v7.5.0 still supports the real-time backup and restoration feature of TiDB Binlog, this component will be completely deprecated in future versions. It is recommended to use [PITR](/br/br-pitr-guide.md) as an alternative solution for data recovery. ## Upgrade TiDB Binlog deployed manually diff --git a/tidb-troubleshooting-map.md b/tidb-troubleshooting-map.md index 8d73f3151c494..bf5d459c29abb 100644 --- a/tidb-troubleshooting-map.md +++ b/tidb-troubleshooting-map.md @@ -400,7 +400,7 @@ Check the specific cause for busy by viewing the monitor **Grafana** -> **TiKV** ### 6.1 TiDB Binlog -- 6.1.1 TiDB Binlog is a tool that collects changes from TiDB and provides backup and replication to downstream TiDB or MySQL platforms. For details, see [TiDB Binlog on GitHub](https://github.com/pingcap/tidb-binlog). +- 6.1.1 [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) (deprecated) is a tool that collects changes from TiDB and provides backup and replication to downstream TiDB or MySQL platforms. For details, see [TiDB Binlog on GitHub](https://github.com/pingcap/tidb-binlog). - 6.1.2 The `Update Time` in Pump/Drainer Status is updated normally, and no anomaly shows in the log, but no data is written to the downstream. diff --git a/transaction-overview.md b/transaction-overview.md index e12e891f22dcf..7467137eb7d5a 100644 --- a/transaction-overview.md +++ b/transaction-overview.md @@ -299,7 +299,7 @@ TiDB previously limited the total number of key-value pairs for a single transac > **Note:** > -> Usually, TiDB Binlog is enabled to replicate data to the downstream. In some scenarios, message middleware such as Kafka is used to consume binlogs that are replicated to the downstream. +> Usually, TiDB Binlog (deprecated) is enabled to replicate data to the downstream. In some scenarios, message middleware such as Kafka is used to consume binlogs that are replicated to the downstream. > > Taking Kafka as an example, the upper limit of Kafka's single message processing capability is 1 GB. Therefore, when `txn-total-size-limit` is set to more than 1 GB, it might happen that the transaction is successfully executed in TiDB, but the downstream Kafka reports an error. To avoid this situation, you need to decide the actual value of `txn-total-size-limit` according to the limit of the end consumer. For example, if Kafka is used downstream, `txn-total-size-limit` must not exceed 1 GB. diff --git a/upgrade-tidb-using-tiup.md b/upgrade-tidb-using-tiup.md index b3069dc80abd0..19c7a4f930ad5 100644 --- a/upgrade-tidb-using-tiup.md +++ b/upgrade-tidb-using-tiup.md @@ -63,7 +63,7 @@ This document is targeted for the following upgrade paths: 2. Use TiUP (`tiup cluster`) to import the TiDB Ansible configuration. 3. Update the 3.0 version to 4.0 according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). 4. Upgrade the cluster to v8.2.0 according to this document. -- Support upgrading the versions of TiDB Binlog, TiCDC, TiFlash, and other components. +- Support upgrading the versions of TiDB Binlog (deprecated), TiCDC, TiFlash, and other components. - When upgrading TiFlash from versions earlier than v6.3.0 to v6.3.0 and later versions, note that the CPU must support the AVX2 instruction set under the Linux AMD64 architecture and the ARMv8 instruction set architecture under the Linux ARM64 architecture. For details, see the description in [v6.3.0 Release Notes](/releases/release-6.3.0.md#others). - For detailed compatibility changes of different versions, see the [Release Notes](/releases/release-notes.md) of each version. Modify your cluster configuration according to the "Compatibility Changes" section of the corresponding release notes. - When updating clusters from versions earlier than v5.3 to v5.3 or later versions, note that there is a time format change in the alerts generated by the default deployed Prometheus. This format change is introduced starting from Prometheus v2.27.1. For more information, see [Prometheus commit](https://github.com/prometheus/prometheus/commit/7646cbca328278585be15fa615e22f2a50b47d06). From 44c51921e2e8c97310602fcb8c61ec0ca128d74d Mon Sep 17 00:00:00 2001 From: zyguan Date: Tue, 20 Aug 2024 10:39:12 +0800 Subject: [PATCH 30/44] tidb: add doc for `batch-policy` config (#18446) --- tidb-configuration-file.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tidb-configuration-file.md b/tidb-configuration-file.md index e1371721bb2c8..5a2b29bb1c7cd 100644 --- a/tidb-configuration-file.md +++ b/tidb-configuration-file.md @@ -726,6 +726,16 @@ Configuration items related to opentracing.reporter. - Default value: `41s` - It is required to set this value larger than twice of the Raft election timeout. +### `batch-policy` New in v8.3.0 + +- Controls the batching strategy for requests from TiDB to TiKV. When sending requests to TiKV, TiDB always encapsulates the requests in the current waiting queue into a `BatchCommandsRequest` and sends it to TiKV as a packet. This is the basic batching strategy. When the TiKV load throughput is high, TiDB decides whether to wait for an additional period after the basic batching based on the value of `batch-policy`. This additional batching allows more requests to be encapsulated in a single `BatchCommandsRequest`. +- Default value: `"standard"` +- Value options: + - `"basic"`: the behavior is consistent with versions before v8.3.0, where TiDB performs additional batching only if [`tikv-client.max-batch-wait-time`](#max-batch-wait-time) is greater than 0 and the load of TiKV exceeds the value of [`tikv-client.overload-threshold`](#overload-threshold). + - `"standard"`: TiDB dynamically batches requests based on the arrival time intervals of recent requests, suitable for high-throughput scenarios. + - `"positive"`: TiDB always performs additional batching, suitable for high-throughput testing scenarios to achieve optimal performance. However, in low-load scenarios, this strategy might introduce unnecessary batching wait time, potentially reducing performance. + - `"custom{...}"`: allows customization of batching strategy parameters. This option is intended for the internal testing of TiDB and is **NOT recommended** for general use. + ### `max-batch-size` - The maximum number of RPC packets sent in batch. If the value is not `0`, the `BatchCommands` API is used to send requests to TiKV, and the RPC latency can be reduced in the case of high concurrency. It is recommended that you do not modify this value. From 69fb12ab6b74fab26260ac05f798777b98aca479 Mon Sep 17 00:00:00 2001 From: yibin <13482527984@163.com> Date: Tue, 20 Aug 2024 10:48:12 +0800 Subject: [PATCH 31/44] Change tidb_opt_projection_push_down default value to ON (#18346) --- system-variables.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/system-variables.md b/system-variables.md index 2e3dfa206ea3f..b9aeee8f9adcc 100644 --- a/system-variables.md +++ b/system-variables.md @@ -4563,11 +4563,17 @@ EXPLAIN FORMAT='brief' SELECT COUNT(1) FROM t WHERE a = 1 AND b IS NOT NULL; ### tidb_opt_projection_push_down New in v6.1.0 -- Scope: SESSION +- Scope: SESSION | GLOBAL +- Persists to cluster: Yes - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): Yes - Type: Boolean -- Default value: `OFF` -- Specifies whether to allow the optimizer to push `Projection` down to the TiKV or TiFlash coprocessor. +- Default value: `ON`. Before v8.3.0, the default value is `OFF`. +- Specifies whether to allow the optimizer to push the `Projection` operator down to the TiKV coprocessor. When enabled, the optimizer might push the following three types of `Projection` operators down to TiKV: + - The top-level expressions of the operator are all [JSON query functions](/functions-and-operators/json-functions/json-functions-search.md) or [JSON value attribute functions](/functions-and-operators/json-functions/json-functions-return.md). For example: `SELECT JSON_EXTRACT(data, '$.name') FROM users;`. + - The top-level expressions of the operator include a mix of JSON query functions or JSON value attribute functions, and direct column reads. For example: `SELECT JSON_DEPTH(data), name FROM users;`. + - The top-level expressions of the operator are all direct column reads, and the number of output columns is less than the number of input columns. For example: `SELECT name FROM users;`. +- The final decision to push down a `Projection` operator also depends on the optimizer's comprehensive evaluation of query cost. +- For TiDB clusters that are upgraded from a version earlier than v8.3.0 to v8.3.0 or later, the default value of this variable is `OFF`. ### tidb_opt_range_max_size New in v6.4.0 From ac5582dd28e7440a8ca4c00f4b0e43b9bc7ccc01 Mon Sep 17 00:00:00 2001 From: Wenqi Mou Date: Tue, 20 Aug 2024 01:13:41 -0400 Subject: [PATCH 32/44] br: add metrics description for snapshot restore and PITR (#18516) --- grafana-tikv-dashboard.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/grafana-tikv-dashboard.md b/grafana-tikv-dashboard.md index b4ef20b9e866f..801408895b7ab 100644 --- a/grafana-tikv-dashboard.md +++ b/grafana-tikv-dashboard.md @@ -495,6 +495,44 @@ This section provides a detailed description of these key metrics on the **TiKV- - Get Region Operation Count: The number of times the coordinator requests Region information from the PD - Try Advance Trigger Time: The time taken for the coordinator to attempt to advance the checkpoint +### Backup & Import + +- Import CPU Utilization: The CPU utilization aggregated by SST importer. +- Import Thread Count: The number of threads used by SST importer. +- Import Errors: The number of errors encountered during SST import. +- Import RPC Duration: The time spent on various RPC calls in SST importer. +- Import RPC Ops: The total number of RPC calls in SST importer. +- Import RPC Count: The number of RPC calls being processed by SST importer. +- Import Write/Download RPC Duration: The RPC time for write or download operations in SST importer. +- Import Wait Duration: The time spent waiting in queue for download task execution. +- Import Read SST Duration: The time spent reading an SST file from external storage and downloading it to TiKV. +- Import Rewrite SST Duration: The time spent rewriting the SST file based on rewrite rules. +- Import Ingest RPC Duration: The time spent handling ingest RPC requests on TiKV. +- Import Ingest SST Duration: The time spent ingesting the SST file into RocksDB. +- Import Ingest SST Bytes: The number of bytes ingested. +- Import Download SST Throughput: The SST download throughput in bytes per second. +- cloud request: The number of requests to cloud providers. + +### Point In Time Restore + +- CPU Usage: The CPU utilization by point-in-time recovery (PITR). +- P99 RPC Duration: The 99th percentile of RPC request duration. +- Import RPC Ops: The total number of RPC calls in SST importer. +- Import RPC Count: The number of RPC calls being processed by SST importer. +- Cache Events: The number of events in the file cache during SST import. +- Overall RPC Duration: The time spent on RPC calls. +- Read File into Memory Duration: The time spent downloading files from external storage and loading them into memory. +- Queuing Time: The time spent waiting to be scheduled on a thread. +- Apply Request Throughput: The rate of applying requests in bytes. +- Downloaded File Size: The size of downloaded file in bytes. +- Apply Batch Size: The number of bytes for applying to Raft store in one batch. +- Blocked by Concurrency Time: The time spent waiting for execution due to concurrency constraints. +- Apply Request Speed: The speed of applying request to Raft store. +- Cached File in Memory: The files cached by the applying requests of SST importer. +- Engine Requests Unfinished: The number of pending requests to Raft store. +- Apply Time: The time spent writing data to Raft store. +- Raft Store Memory Usage: The memory usage for Raft store. + ### Explanation of Common Parameters #### gRPC Message Type From 7740943302d9f81afd8c48ca14a08ad61101f95a Mon Sep 17 00:00:00 2001 From: Grace Cai Date: Tue, 20 Aug 2024 13:30:12 +0800 Subject: [PATCH 33/44] doc: session_alias and rows_affected in information_schema.processlist (#18503) --- identify-expensive-queries.md | 4 +- .../information-schema-processlist.md | 113 +++++++++--------- releases/release-8.2.0.md | 4 + 3 files changed, 64 insertions(+), 57 deletions(-) diff --git a/identify-expensive-queries.md b/identify-expensive-queries.md index 6faf1953beea0..f0d3dff9758ad 100644 --- a/identify-expensive-queries.md +++ b/identify-expensive-queries.md @@ -15,7 +15,7 @@ TiDB allows you to identify expensive queries during SQL execution, so you can d ## Expensive query log example ```sql -[2020/02/05 15:32:25.096 +08:00] [WARN] [expensivequery.go:167] [expensive_query] [cost_time=60.008338935s] [wait_time=0s] [request_count=1] [total_keys=70] [process_keys=65] [num_cop_tasks=1] [process_avg_time=0s] [process_p90_time=0s] [process_max_time=0s] [process_max_addr=10.0.1.9:20160] [wait_avg_time=0.002s] [wait_p90_time=0.002s] [wait_max_time=0.002s] [wait_max_addr=10.0.1.9:20160] [stats=t:pseudo] [conn_id=60026] [user=root] [database=test] [table_ids="[122]"] [txn_start_ts=414420273735139329] [mem_max="1035 Bytes (1.0107421875 KB)"] [sql="insert into t select sleep(1) from t"] +[expensivequery.go:145] [expensive_query] [cost_time=60.021998785s] [cop_time=0.022540151s] [process_time=28.448316643s] [wait_time=0.045507163s] [request_count=430] [total_keys=3538276] [process_keys=3537846] [num_cop_tasks=430] [process_avg_time=0.066158875s] [process_p90_time=0.140427865s] [process_max_time=0.27903656s] [process_max_addr=tikv-1-peer:20160] [wait_avg_time=0.00010583s] [wait_p90_time=0.000358794s] [wait_max_time=0.001218721s] [wait_max_addr=tikv-1-peer:20160] [stats=usertable:451469035823955972] [conn=1621098504] [user=root] [database=test] [table_ids="[104]"] [txn_start_ts=451469037501677571] [mem_max="621043469 Bytes (592.3 MB)"] [sql="insert /*+ SET_VAR(tidb_dml_type=bulk) */ into usertable_2 select * from usertable limit 5000000"] [session_alias=] ["affected rows"=3505282]] ``` ## Fields description @@ -27,6 +27,8 @@ Basic fields: * `table_ids`: The IDs of the tables involved in a statement. * `txn_start_ts`: The start timestamp and the unique ID of a transaction. You can use this value to search for the transaction-related logs. * `sql`: The sql statement. +* `session_alias`: The alias of the current session. +* `affected rows`: The number of rows currently affected by the statement. Memory usage related fields: diff --git a/information-schema/information-schema-processlist.md b/information-schema/information-schema-processlist.md index 7d228fe385e2a..b9029237eb7f9 100644 --- a/information-schema/information-schema-processlist.md +++ b/information-schema/information-schema-processlist.md @@ -14,6 +14,8 @@ The `PROCESSLIST` table has additional columns not present in `SHOW PROCESSLIST` * A `DISK` column to show the disk usage in bytes. * A `TxnStart` column to show the start time of the transaction. * A `RESOURCE_GROUP` column to show the resource group name. +* A `SESSION_ALIAS` column to show the alias of the current session. +* A `ROWS_AFFECTED` column to show the number of rows currently affected by the statement. ```sql USE information_schema; @@ -21,63 +23,67 @@ DESC processlist; ``` ```sql -+---------------------+---------------------+------+------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+---------------------+---------------------+------+------+---------+-------+ -| ID | bigint(21) unsigned | NO | | 0 | | -| USER | varchar(16) | NO | | | | -| HOST | varchar(64) | NO | | | | -| DB | varchar(64) | YES | | NULL | | -| COMMAND | varchar(16) | NO | | | | -| TIME | int(7) | NO | | 0 | | -| STATE | varchar(7) | YES | | NULL | | -| INFO | longtext | YES | | NULL | | -| DIGEST | varchar(64) | YES | | | | -| MEM | bigint(21) unsigned | YES | | NULL | | -| DISK | bigint(21) unsigned | YES | | NULL | | -| TxnStart | varchar(64) | NO | | | | -| RESOURCE_GROUP | varchar(32) | NO | | | | -+---------------------+---------------------+------+------+---------+-------+ -13 rows in set (0.00 sec) ++----------------+---------------------+------+------+---------+-------+ +| Field | Type | Null | Key | Default | Extra | ++----------------+---------------------+------+------+---------+-------+ +| ID | bigint(21) unsigned | NO | | 0 | | +| USER | varchar(16) | NO | | | | +| HOST | varchar(64) | NO | | | | +| DB | varchar(64) | YES | | NULL | | +| COMMAND | varchar(16) | NO | | | | +| TIME | int(7) | NO | | 0 | | +| STATE | varchar(7) | YES | | NULL | | +| INFO | longtext | YES | | NULL | | +| DIGEST | varchar(64) | YES | | | | +| MEM | bigint(21) unsigned | YES | | NULL | | +| DISK | bigint(21) unsigned | YES | | NULL | | +| TxnStart | varchar(64) | NO | | | | +| RESOURCE_GROUP | varchar(32) | NO | | | | +| SESSION_ALIAS | varchar(64) | NO | | | | +| ROWS_AFFECTED | bigint(21) unsigned | YES | | NULL | | ++----------------+---------------------+------+------+---------+-------+ ``` ```sql -SELECT * FROM processlist\G +SELECT * FROM information_schema.processlist\G ``` ```sql *************************** 1. row *************************** - ID: 2300033189772525975 - USER: root - HOST: 127.0.0.1:51289 - DB: NULL - COMMAND: Query - TIME: 0 - STATE: autocommit - INFO: SELECT * FROM processlist - DIGEST: dbfaa16980ec628011029f0aaf0d160f4b040885240dfc567bf760d96d374f7e - MEM: 0 - DISK: 0 - TxnStart: - RESOURCE_GROUP: rg1 -1 row in set (0.00 sec) + ID: 1268776964 + USER: root + HOST: 127.0.0.1:59922 + DB: NULL + COMMAND: Query + TIME: 0 + STATE: autocommit + INFO: SELECT * FROM information_schema.processlist + DIGEST: 4b5e7cdd5d3ed84d6c1a6d56403a3d512554b534313caf296268abdec1c9ea99 + MEM: 0 + DISK: 0 + TxnStart: +RESOURCE_GROUP: default + SESSION_ALIAS: + ROWS_AFFECTED: 0 ``` Fields in the `PROCESSLIST` table are described as follows: -* ID: The ID of the user connection. -* USER: The name of the user who is executing `PROCESS`. -* HOST: The address that the user is connecting to. -* DB: The name of the currently connected default database. -* COMMAND: The command type that `PROCESS` is executing. -* TIME: The current execution duration of `PROCESS`, in seconds. -* STATE: The current connection state. -* INFO: The requested statement that is being processed. -* DIGEST: The digest of the SQL statement. -* MEM: The memory used by the request that is being processed, in bytes. -* DISK: The disk usage in bytes. -* TxnStart: The start time of the transaction. -* RESOURCE_GROUP: The resource group name. +* `ID`: The ID of the user connection. +* `USER`: The name of the user who is executing `PROCESS`. +* `HOST`: The address that the user is connecting to. +* `DB`: The name of the currently connected default database. +* `COMMAND`: The command type that `PROCESS` is executing. +* `TIME`: The current execution duration of `PROCESS`, in seconds. +* `STATE`: The current connection state. +* `INFO`: The requested statement that is being processed. +* `DIGEST`: The digest of the SQL statement. +* `MEM`: The memory used by the request that is being processed, in bytes. +* `DISK`: The disk usage in bytes. +* `TxnStart`: The start time of the transaction. +* `RESOURCE_GROUP`: The resource group name. +* `SESSION_ALIAS`: The alias of the current session. +* `ROWS_AFFECTED`: The number of rows currently affected by the statement. ## CLUSTER_PROCESSLIST @@ -88,14 +94,9 @@ SELECT * FROM information_schema.cluster_processlist; ``` ```sql -+-----------------+-----+------+----------+------+---------+------+------------+------------------------------------------------------+-----+----------------------------------------+----------------+ -| INSTANCE | ID | USER | HOST | DB | COMMAND | TIME | STATE | INFO | MEM | TxnStart | RESOURCE_GROUP | -+-----------------+-----+------+----------+------+---------+------+------------+------------------------------------------------------+-----+----------------------------------------+----------------+ - -| 10.0.1.22:10080 | 150 | u1 | 10.0.1.1 | test | Query | 0 | autocommit | select count(*) from usertable | 372 | 05-28 03:54:21.230(416976223923077223) | default | -| 10.0.1.22:10080 | 138 | root | 10.0.1.1 | test | Query | 0 | autocommit | SELECT * FROM information_schema.cluster_processlist | 0 | 05-28 03:54:21.230(416976223923077220) | rg1 | -| 10.0.1.22:10080 | 151 | u1 | 10.0.1.1 | test | Query | 0 | autocommit | select count(*) from usertable | 372 | 05-28 03:54:21.230(416976223923077224) | rg2 | -| 10.0.1.21:10080 | 15 | u2 | 10.0.1.1 | test | Query | 0 | autocommit | select max(field0) from usertable | 496 | 05-28 03:54:21.230(416976223923077222) | default | -| 10.0.1.21:10080 | 14 | u2 | 10.0.1.1 | test | Query | 0 | autocommit | select max(field0) from usertable | 496 | 05-28 03:54:21.230(416976223923077225) | default | -+-----------------+-----+------+----------+------+---------+------+------------+------------------------------------------------------+-----+----------------------------------------+----------------+ ++-----------------+------------+------+-----------------+------+---------+------+------------+------------------------------------------------------+------------------------------------------------------------------+------+------+----------------------------------------+----------------+---------------+---------------+ +| INSTANCE | ID | USER | HOST | DB | COMMAND | TIME | STATE | INFO | DIGEST | MEM | DISK | TxnStart | RESOURCE_GROUP | SESSION_ALIAS | ROWS_AFFECTED | ++-----------------+------------+------+-----------------+------+---------+------+------------+------------------------------------------------------+------------------------------------------------------------------+------+------+----------------------------------------+----------------+---------------+---------------+ +| 127.0.0.1:10080 | 1268776964 | root | 127.0.0.1:59922 | NULL | Query | 0 | autocommit | SELECT * FROM information_schema.cluster_processlist | b1e38e59fbbc3e2b35546db5c8053040db989a497ac6cd71ff8dd4394395701a | 0 | 0 | 07-29 12:39:24.282(451471727468740609) | default | | 0 | ++-----------------+------------+------+-----------------+------+---------+------+------------+------------------------------------------------------+------------------------------------------------------------------+------+------+----------------------------------------+----------------+---------------+---------------+ ``` diff --git a/releases/release-8.2.0.md b/releases/release-8.2.0.md index 9b126daa7a69d..8426f56ddaf68 100644 --- a/releases/release-8.2.0.md +++ b/releases/release-8.2.0.md @@ -192,6 +192,10 @@ Quick access: [Quick start](https://docs.pingcap.com/tidb/v8.2/quick-start-with- | TiKV | [`server.grpc-compression-type`](/tikv-configuration-file.md#grpc-compression-type) | Modified | This configuration item now also controls the compression algorithm of response messages sent from TiKV to TiDB. Enabling compression might consume more CPU resources. | | TiFlash | [`security.redact_info_log`](/tiflash/tiflash-configuration.md#configure-the-tiflashtoml-file) | Modified | Introduces a new value option `marker`. When you set the value to `marker`, all user data in the log is wrapped in `‹ ›`. | +### System tables + +* The [`INFORMATION_SCHEMA.PROCESSLIST`](/information-schema/information-schema-processlist.md) and [`INFORMATION_SCHEMA.CLUSTER_PROCESSLIST`](/information-schema/information-schema-processlist.md#cluster_processlist) system tables add the `SESSION_ALIAS` field to show the alias of the current session. [#46889](https://github.com/pingcap/tidb/issues/46889) @[lcwangchao](https://github.com/lcwangchao) + ### Compiler versions * To improve the TiFlash development experience, the minimum version of LLVM required to compile and build TiDB has been upgraded from 13.0 to 17.0. If you are a TiDB developer, you need to upgrade the version of your LLVM compiler to ensure a smooth build. [#7193](https://github.com/pingcap/tiflash/issues/7193) @[Lloyd-Pottiger](https://github.com/Lloyd-Pottiger) From 08a3a171cc3badf0c71dc3c61692cab64dfe53d5 Mon Sep 17 00:00:00 2001 From: dbsid Date: Tue, 20 Aug 2024 15:07:42 +0800 Subject: [PATCH 34/44] fix bug in performance tuning practices (#18615) --- performance-tuning-practices.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/performance-tuning-practices.md b/performance-tuning-practices.md index 13a52c0db9730..4e6e77272d755 100644 --- a/performance-tuning-practices.md +++ b/performance-tuning-practices.md @@ -427,7 +427,7 @@ By comparing the performance of each scenario, we can draw the following conclus - TiDB is compatible with different commands of the MySQL protocol. When using the Prepared Statement interface and setting the following JDBC connection parameters, the application can achieve its best performance: ``` - useServerPrepStmts=true&cachePrepStmts=true&prepStmtCacheSize=1000&prepStmtCacheSqlLimit=20480&useConfigs= maxPerformance + useServerPrepStmts=true&cachePrepStmts=true&prepStmtCacheSize=1000&prepStmtCacheSqlLimit=20480&useConfigs=maxPerformance ``` - It is recommended that you use TiDB Dashboard (for example, the Top SQL feature and Continuous Profiling feature) and Performance Overview dashboard for performance analysis and tuning. From c1e99adc0c4d8bd438695dc1999a6b5b87af0a01 Mon Sep 17 00:00:00 2001 From: Grace Cai Date: Tue, 20 Aug 2024 15:57:12 +0800 Subject: [PATCH 35/44] PD: add description of batch in evict-leader-scheduler config (#18625) --- pd-control.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pd-control.md b/pd-control.md index daf4bcbd6e2d7..44046e03a932b 100644 --- a/pd-control.md +++ b/pd-control.md @@ -979,6 +979,12 @@ Use this command to view and manage the configuration of the `evict-leader-sched If all store configurations of an `evict-leader-scheduler` are removed, the scheduler itself is automatically removed. +- When an `evict-leader-scheduler` already exists, use the `set batch` subcommand to modify the `batch` value. `batch` controls the number of Operators generated during a single scheduling process. The default value is `3`, and the range is `[1, 10]`. The larger the `batch` value, the faster the scheduling speed. + + ```bash + scheduler config evict-leader-scheduler set batch 10 // Set the batch value to 10 + ``` + ### `service-gc-safepoint` Use this command to query the current GC safepoint and service GC safepoint. The output is as follows: From da5669a1588ae37004527a15f997b64c3b074cc2 Mon Sep 17 00:00:00 2001 From: Lilian Lee Date: Tue, 20 Aug 2024 17:01:42 +0800 Subject: [PATCH 36/44] Refine slow log description (#18366) --- system-variables.md | 2 +- tidb-configuration-file.md | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/system-variables.md b/system-variables.md index b9aeee8f9adcc..91fda26c2be14 100644 --- a/system-variables.md +++ b/system-variables.md @@ -5347,7 +5347,7 @@ Query OK, 0 rows affected, 1 warning (0.00 sec) - Default value: `300` - Range: `[-1, 9223372036854775807]` - Unit: Milliseconds -- This variable is used to output the threshold value of the time consumed by the slow log. When the time consumed by a query is larger than this value, this query is considered as a slow log and its log is output to the slow query log. +- This variable outputs the threshold value of the time consumed by the slow log, and is set to 300 milliseconds by default. When the time consumed by a query is larger than this value, this query is considered as a slow query and its log is output to the slow query log. Note that when the output level of [`log.level`](https://docs.pingcap.com/tidb/dev/tidb-configuration-file#level) is `"debug"`, all queries are recorded in the slow query log, regardless of the setting of this variable. ### tidb_slow_query_file diff --git a/tidb-configuration-file.md b/tidb-configuration-file.md index 5a2b29bb1c7cd..59b77a8a20a10 100644 --- a/tidb-configuration-file.md +++ b/tidb-configuration-file.md @@ -309,7 +309,7 @@ Configuration items related to log. - Outputs the threshold value of consumed time in the slow log. - Default value: `300` - Unit: Milliseconds -- If the value in a query is larger than the default value, it is a slow query and is output to the slow log. +- When the time consumed by a query is larger than this value, this query is considered as a slow query and its log is output to the slow query log. Note that when the output level of [`log.level`](#level) is `"debug"`, all queries are recorded in the slow query log, regardless of the setting of this parameter. - Since v6.1.0, the threshold value of consumed time in the slow log is specified by the TiDB configuration item [`instance.tidb_slow_log_threshold`](/tidb-configuration-file.md#tidb_slow_log_threshold) or the system variable [`tidb_slow_log_threshold`](/system-variables.md#tidb_slow_log_threshold). `slow-threshold` still takes effect. But if `slow-threshold` and `instance.tidb_slow_log_threshold` are set at the same time, the latter takes effect. ### `record-plan-in-slow-log` @@ -914,10 +914,11 @@ Configuration items related to read isolation. ### `tidb_slow_log_threshold` -- This configuration is used to output the threshold value of the time consumed by the slow log. When the time consumed by a query is larger than this value, this query is considered as a slow log and its log is output to the slow query log. +- Outputs the threshold value of the time consumed by the slow log. - Default value: `300` - Range: `[-1, 9223372036854775807]` - Unit: Milliseconds +- When the time consumed by a query is larger than this value, this query is considered as a slow query and its log is output to the slow query log. Note that when the output level of [`log.level`](#level) is `"debug"`, all queries are recorded in the slow query log, regardless of the setting of this parameter. - Before v6.1.0, this configuration is set by `slow-threshold`. ### `in-mem-slow-query-topn-num` New in v7.3.0 From cce45fe1b9627650ca37cc0822e263e696042442 Mon Sep 17 00:00:00 2001 From: Grace Cai Date: Wed, 21 Aug 2024 10:46:42 +0800 Subject: [PATCH 37/44] br: docs on restore disk protection (#18502) --- br/br-snapshot-guide.md | 1 + 1 file changed, 1 insertion(+) diff --git a/br/br-snapshot-guide.md b/br/br-snapshot-guide.md index 01e091b237179..cbb90029054ca 100644 --- a/br/br-snapshot-guide.md +++ b/br/br-snapshot-guide.md @@ -68,6 +68,7 @@ The output is as follows, corresponding to the physical time `2022-09-08 13:30:0 > - Starting from BR v7.6.0, to address potential restore bottlenecks in scenarios with large-scale Regions, BR supports accelerating restore through the coarse-grained Region scattering algorithm (experimental). You can enable this feature by specifying the command-line parameter `--granularity="coarse-grained"`. > - Starting from BR v8.0.0, the snapshot restore through the coarse-grained Region scattering algorithm is generally available (GA) and enabled by default. BR improves the snapshot restore speed significantly by implementing various optimizations such as adopting the coarse-grained Region scattering algorithm, creating databases and tables in batches, reducing the mutual impact between SST file downloads and ingest operations, and accelerating the restore of table statistics. According to test results from real-world cases, the SST file download speed for snapshot restore is improved by approximately up to 10 times, the data restore speed per TiKV node stabilizes at 1.2 GiB/s, the end-to-end restore speed is improved by approximately 1.5 to 3 times, and 100 TiB of data can be restored within one hour. > - Starting from BR v8.2.0, the command line parameter `--granularity` is deprecated, and the coarse-grained Region scattering algorithm is enabled by default. +> - Starting from BR v8.3.0, the snapshot restore task introduces available disk space checks for TiKV and TiFlash: at the beginning of the task, BR verifies whether TiKV and TiFlash have sufficient disk space based on the size of SST files to be restored; for TiKV v8.3.0 or later version, TiKV verifies whether it has sufficient disk space before downloading each SST file. If the space is insufficient according to any of these checks, the restore task fails with an error. You can skip the check at the beginning of the restore task by setting `--check-requirements=false`, but the disk space check before TiKV downloads each SST file cannot be skipped. You can restore a snapshot backup by running the `tiup br restore full` command. Run `tiup br restore full --help` to see the help information: From ae8f3cc041735c37931c2d0014d06e7dabf112d6 Mon Sep 17 00:00:00 2001 From: ekexium Date: Wed, 21 Aug 2024 10:52:42 +0800 Subject: [PATCH 38/44] doc: concurrency of gc delete-range (#18359) --- system-variables.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/system-variables.md b/system-variables.md index 91fda26c2be14..fd12bdf445bc4 100644 --- a/system-variables.md +++ b/system-variables.md @@ -3007,9 +3007,14 @@ For a system upgraded to v5.0 from an earlier version, if you have not modified - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No - Type: Integer - Default value: `-1` -- Range: `[1, 256]` +- Range: `-1` or `[1, 256]` - Unit: Threads -- Specifies the number of threads in the [Resolve Locks](/garbage-collection-overview.md#resolve-locks) step of GC. A value of `-1` means that TiDB will automatically decide the number of garbage collection threads to use. +- This variable controls the number of concurrent threads during the [Resolve Locks](/garbage-collection-overview.md#resolve-locks) step of the [Garbage Collection (GC)](/garbage-collection-overview.md) process. +- Starting from v8.3.0, this variable also controls the number of concurrent threads during the [Delete Ranges](/garbage-collection-overview.md#delete-ranges) step of the GC process. +- By default, this variable is `-1`, allowing TiDB to automatically determine the appropriate number of threads based on workloads. +- When this variable is set to a number in the range of `[1, 256]`: + - Resolve Locks directly uses the value set for this variable as the number of threads. + - Delete Range uses one-fourth of the value set for this variable as the number of threads. ### tidb_gc_enable New in v5.0 From dd95725bea7578e7d50a78caeee6f87f23e27f42 Mon Sep 17 00:00:00 2001 From: Hu# Date: Thu, 22 Aug 2024 08:12:12 +0800 Subject: [PATCH 39/44] pd: Fix the default name of the configuration (#18653) --- command-line-flags-for-pd-configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command-line-flags-for-pd-configuration.md b/command-line-flags-for-pd-configuration.md index c12a03b8a3bf2..033bf92d744e1 100644 --- a/command-line-flags-for-pd-configuration.md +++ b/command-line-flags-for-pd-configuration.md @@ -83,7 +83,7 @@ PD is configurable using command-line flags and environment variables. ## `--name` - The human-readable unique name for this PD member -- Default: `"pd"` +- Default: `"pd-${hostname}"` - If you want to start multiply PDs, you must use different name for each one. ## `--cacert` From 2ae056742aad14b8c7b3a59a7ce94a22410e058a Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Aug 2024 10:40:42 +0800 Subject: [PATCH 40/44] add v8.3.0 release notes (#18386) --- TOC.md | 2 +- releases/release-8.3.0.md | 436 +++++++++++++++++++++++++++++++++++ releases/release-notes.md | 6 +- releases/release-timeline.md | 1 + 4 files changed, 443 insertions(+), 2 deletions(-) create mode 100644 releases/release-8.3.0.md diff --git a/TOC.md b/TOC.md index 4b281c3706fa3..e614aa4d3d8ea 100644 --- a/TOC.md +++ b/TOC.md @@ -4,7 +4,7 @@ - [Docs Home](https://docs.pingcap.com/) - About TiDB - [TiDB Introduction](/overview.md) - - [TiDB 8.2 Release Notes](/releases/release-8.2.0.md) + - [TiDB 8.3 Release Notes](/releases/release-8.3.0.md) - [Features](/basic-features.md) - [MySQL Compatibility](/mysql-compatibility.md) - [TiDB Limitations](/tidb-limitations.md) diff --git a/releases/release-8.3.0.md b/releases/release-8.3.0.md new file mode 100644 index 0000000000000..6eb5b55c5f395 --- /dev/null +++ b/releases/release-8.3.0.md @@ -0,0 +1,436 @@ +--- +title: TiDB 8.3.0 Release Notes +summary: Learn about the new features, compatibility changes, improvements, and bug fixes in TiDB 8.3.0. +--- + +# TiDB 8.3.0 Release Notes + +Release date: August 22, 2024 + +TiDB version: 8.3.0 + +Quick access: [Quick start](https://docs.pingcap.com/tidb/v8.3/quick-start-with-tidb) + +8.3.0 introduces the following key features and improvements: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryFeature/EnhancementDescription
Scalability and Performance Global indexes for partitioned tables (experimental)Global indexes can effectively improve the efficiency of retrieving non-partitioned columns, and remove the restriction that a unique key must contain the partition key. This feature extends the usage scenarios of TiDB partitioned tables and avoids some of the application modification work that might be required for data migration.
Default pushdown of the Projection operator to the storage enginePushing the Projection operator down to the storage engine can distribute the load across storage nodes while reducing data transfer between nodes. This optimization helps to reduce the execution time for certain SQL queries and improves the overall database performance.
Ignoring unnecessary columns when collecting statisticsUnder the premise of ensuring that the optimizer can obtain the necessary information, TiDB speeds up statistics collection, improves the timeliness of statistics, and thus ensures that the optimal execution plan is selected, improving the performance of the cluster. Meanwhile, TiDB also reduces the system overhead and improves the resource utilization.
Reliability and AvailabilityBuilt-in virtual IP management in TiProxyTiProxy introduces built-in virtual IP management. When configured, it supports automatic virtual IP switching without relying on external platforms or tools. This feature simplifies TiProxy deployment and reduces the complexity of the database access layer.
+ +## Feature details + +### Performance + +* The optimizer allows pushing the `Projection` operator down to the storage engine by default [#51876](https://github.com/pingcap/tidb/issues/51876) @[yibin87](https://github.com/yibin87) + + Pushing the `Projection` operator down to the storage engine reduces data transfer between the compute engine and the storage engine, thereby improving SQL execution performance. This is particularly effective for queries containing [JSON query functions](/functions-and-operators/json-functions/json-functions-search.md) or [JSON value attribute functions](/functions-and-operators/json-functions/json-functions-return.md). Starting from v8.3.0, TiDB enables the `Projection` operator pushdown feature by default, by changing the default value of the system variable controlling this feature, [`tidb_opt_projection_push_down`](/system-variables.md#tidb_opt_projection_push_down-new-in-v610), from `OFF` to `ON`. When this feature is enabled, the optimizer automatically pushes eligible JSON query functions and JSON value attribute functions down to the storage engine. + + For more information, see [documentation](/system-variables.md#tidb_opt_projection_push_down-new-in-v610). + +* Optimize batch processing strategy for KV (key-value) requests [#55206](https://github.com/pingcap/tidb/issues/55206) @[zyguan](https://github.com/zyguan) + + TiDB fetches data by sending KV requests to TiKV. Batching and processing KV requests in bulk can significantly improve execution performance. Before v8.3.0, the batching strategy in TiDB is less efficient. Starting from v8.3.0, TiDB introduces several more efficient batching strategies in addition to the existing one. You can configure different batching strategies using the [`tikv-client.batch-policy`](/tidb-configuration-file.md#batch-policy-new-in-v830) configuration item to accommodate various workloads. + + For more information, see [documentation](/tidb-configuration-file.md#batch-policy-new-in-v830). + +* TiFlash introduces HashAgg aggregation calculation modes to improve the performance for high NDV data [#9196](https://github.com/pingcap/tiflash/issues/9196) @[guo-shaoge](https://github.com/guo-shaoge) + + Before v8.3.0, TiFlash has low aggregation calculation efficiency during the first stage of HashAgg aggregation when handling data with high NDV (number of distinct values). Starting from v8.3.0, TiFlash introduces multiple HashAgg aggregation calculation modes to improve the aggregation performance for different data characteristics. To choose a desired HashAgg aggregation calculation mode, you can configure the [`tiflash_hashagg_preaggregation_mode`](/system-variables.md#tiflash_hashagg_preaggregation_mode-new-in-v830) system variable. + + For more information, see [documentation](/system-variables.md#tiflash_hashagg_preaggregation_mode-new-in-v830). + +* Ignore unnecessary columns when collecting statistics [#53567](https://github.com/pingcap/tidb/issues/53567) @[hi-rustin](https://github.com/hi-rustin) + + When the optimizer generates an execution plan, it only needs statistics for some columns, such as columns in the filter conditions, columns in the join keys, and columns used for aggregation. Starting from v8.3.0, TiDB continuously observes the historical records of the columns used in SQL statements. By default, TiDB only collects statistics for columns with indexes and columns that are observed to require statistics collection. This speeds up the collection of statistics and avoids unnecessary resource consumption. + + When you upgrade your cluster from a version earlier than v8.3.0 to v8.3.0 or later, TiDB retains the original behavior by default, that is, collecting statistics for all columns. To enable this feature, you need to manually set the system variable [`tidb_analyze_column_options`](/system-variables.md#tidb_analyze_column_options-new-in-v830) to `PREDICATE`. For newly deployed clusters, this feature is enabled by default. + + For analytical systems with many random queries, you can set the system variable [`tidb_analyze_column_options`](/system-variables.md#tidb_analyze_column_options-new-in-v830) to `ALL` to collect statistics for all columns, to ensure the performance of random queries. For other types of systems, it is recommended to keep the default setting (`PREDICATE`) of [`tidb_analyze_column_options`](/system-variables.md#tidb_analyze_column_options-new-in-v830) to collect statistics for only necessary columns. + + For more information, see [documentation](/statistics.md#collect-statistics-on-some-columns). + +* Improve the query performance of some system tables [#50305](https://github.com/pingcap/tidb/issues/50305) @[tangenta](https://github.com/tangenta) + + In previous versions, querying system tables has poor performance when the cluster size becomes large and there are a large number of tables. + + In v8.0.0, query performance is optimized for the following four system tables: + + - `INFORMATION_SCHEMA.TABLES` + - `INFORMATION_SCHEMA.STATISTICS` + - `INFORMATION_SCHEMA.KEY_COLUMN_USAGE` + - `INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS` + + In v8.3.0, the query performance is optimized for the following system tables, bringing a multi-fold performance improvement compared to v8.2.0. + + - `INFORMATION_SCHEMA.CHECK_CONSTRAINTS` + - `INFORMATION_SCHEMA.COLUMNS` + - `INFORMATION_SCHEMA.PARTITIONS` + - `INFORMATION_SCHEMA.SCHEMATA` + - `INFORMATION_SCHEMA.SEQUENCES` + - `INFORMATION_SCHEMA.TABLE_CONSTRAINTS` + - `INFORMATION_SCHEMA.TIDB_CHECK_CONSTRAINTS` + - `INFORMATION_SCHEMA.TiDB_INDEXES` + - `INFORMATION_SCHEMA.TIDB_INDEX_USAGE` + - `INFORMATION_SCHEMA.VIEWS` + +* Support partition pruning when partition expressions use the `EXTRACT(YEAR_MONTH...)` function to improve query performance [#54209](https://github.com/pingcap/tidb/pull/54209) @[mjonss](https://github.com/mjonss) + + In previous versions, when partition expressions use the `EXTRACT(YEAR_MONTH...)` function, partition pruning is not supported, resulting in poor query performance. Starting from v8.3.0, partition pruning is supported when partition expressions use the `EXTRACT(YEAR_MONTH...)` function, which improves query performance. + + For more information, see [documentation](/partition-pruning.md#scenario-three). + +* Improve the performance of `CREATE TABLE` by 1.4 times, `CREATE DATABASE` by 2.1 times, and `ADD COLUMN` by 2 times [#54436](https://github.com/pingcap/tidb/issues/54436) @[D3Hunter](https://github.com/D3Hunter) + + TiDB v8.0.0 introduces the system variable [`tidb_enable_fast_create_table`](/system-variables.md#tidb_enable_fast_create_table-new-in-v800) to improve table creation performance in batch table creation scenarios. In v8.3.0, when submitting the DDL statements for table creation concurrently through 10 sessions in a single database, the performance is improved by 1.4 times compared with v8.2.0. + + In v8.3.0, the performance of general DDLs in batch execution has improved compared to v8.2.0. The performance of `CREATE DATABASE` for 10 sessions concurrently improves by 19 times compared with v8.1.0 and 2.1 times compared with v8.2.0. The performance of using 10 sessions to add columns (`ADD COLUMN`) to multiple tables in the same database in batch has improved by 10 times compared with v8.1.0, and 2.1 times compared with v8.2.0. The performance of `ADD COLUMN` with 10 sessions on multiple tables in the same database has improved by 10 times compared with v8.1.0 and 2 times compared with v8.2.0. + + For more information, see [documentation](/system-variables.md#tidb_enable_fast_create_table-new-in-v800). + +* Partitioned tables support global indexes (experimental) [#45133](https://github.com/pingcap/tidb/issues/45133) @[mjonss](https://github.com/mjonss) @[Defined2014](https://github.com/Defined2014) @[jiyfhust](https://github.com/jiyfhust) @[L-maple](https://github.com/L-maple) + + In previous versions of partitioned tables, some limitations exist because global indexes are not supported. For example, the unique key must use every column in the table's partitioning expression. If the query condition does not use the partitioning key, the query will scan all partitions, resulting in poor performance. Starting from v7.6.0, the system variable [`tidb_enable_global_index`](/system-variables.md#tidb_enable_global_index-new-in-v760) is introduced to enable the global index feature. But this feature was under development at that time and it is not recommended to enable it. + + Starting with v8.3.0, the global index feature is released as an experimental feature. You can explicitly create a global index for a partitioned table with the keyword `Global` to remove the restriction that the unique key must use every column in the table's partitioning expression, to meet flexible business needs. Global indexes also enhance the performance of queries that do not include partition keys. + + For more information, see [documentation](/partitioned-table.md#global-indexes). + +### Reliability + +* Support streaming cursor result sets (experimental) [#54526](https://github.com/pingcap/tidb/issues/54526) @[YangKeao](https://github.com/YangKeao) + + When the application code retrieves the result set using [Cursor Fetch](/develop/dev-guide-connection-parameters.md#use-streamingresult-to-get-the-execution-result), TiDB usually first stores the complete result set in memory, and then returns the data to the client in batches. If the result set is too large, TiDB might temporarily write the result to the hard disk. + + Starting from v8.3.0, if you set the system variable [`tidb_enable_lazy_cursor_fetch`](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830) to `ON`, TiDB no longer reads all data to the TiDB node, but gradually reads data to the TiDB node as the client reads. When TiDB processes large result sets, this feature reduces the memory usage of the TiDB node and improves the stability of the cluster. + + For more information, see [documentation](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830). + +* Enhance SQL execution plan binding [#55280](https://github.com/pingcap/tidb/issues/55280) [#55343](https://github.com/pingcap/tidb/issues/55343) @[time-and-fate](https://github.com/time-and-fate) + + In OLTP scenarios, the optimal execution plan for most SQL statements is fixed. Implementing SQL execution plan binding for important SQL statements in the application can reduce the probability of the execution plan becoming worse and improve system stability. To meet the requirements of creating a large number of SQL execution plan bindings, TiDB enhances the capability and experience of SQL binding, including: + + - Use a single SQL statement to create SQL execution plan bindings from multiple historical execution plans to improve the efficiency of creating bindings. + - The SQL execution plan binding supports more optimizer hints, and optimizes the conversion method for complex execution plans, making the binding more stable in restoring the execution plan. + + For more information, see [documentation](/sql-plan-management.md). + +### Availability + +* TiProxy supports built-in virtual IP management [#583](https://github.com/pingcap/tiproxy/issues/583) @[djshow832](https://github.com/djshow832) + + Before v8.3.0, when using primary-secondary mode for high availability, TiProxy requires an additional component to manage the virtual IP address. Starting from v8.3.0, TiProxy supports built-in virtual IP management. In primary-secondary mode, when a primary node fails over, the new primary node will automatically bind to the specified virtual IP, ensuring that clients can always connect to an available TiProxy through the virtual IP. + + To enable virtual IP management, specify the virtual IP address using the TiProxy configuration item [`ha.virtual-ip`](/tiproxy/tiproxy-configuration.md#virtual-ip) and specify the network interface to bind the virtual IP to using [`ha.interface`](/tiproxy/tiproxy-configuration.md#interface). The virtual IP will be bound to a TiProxy instance only when both of these configuration items are set. + + For more information, see [documentation](/tiproxy/tiproxy-overview.md). + +### SQL + +* Support upgrading `SELECT LOCK IN SHARE MODE` to exclusive locks [#54999](https://github.com/pingcap/tidb/issues/54999) @[cfzjywxk](https://github.com/cfzjywxk) + + TiDB does not support `SELECT LOCK IN SHARE MODE` yet. Starting from v8.3.0, TiDB supports upgrading `SELECT LOCK IN SHARE MODE` to exclusive locks to enable support for `SELECT LOCK IN SHARE MODE`. You can control whether to enable this feature by using the new system variable [`tidb_enable_shared_lock_promotion`](/system-variables.md#tidb_enable_shared_lock_promotion-new-in-v830). + + For more information, see [documentation](/system-variables.md#tidb_enable_shared_lock_promotion-new-in-v830). + +### Observability + +* Show the progress of loading initial statistics [#53564](https://github.com/pingcap/tidb/issues/53564) @[hawkingrei](https://github.com/hawkingrei) + + TiDB loads basic statistics when it starts. In scenarios with many tables or partitions, this process can take a long time. When the configuration item [`force-init-stats`](/tidb-configuration-file.md#force-init-stats-new-in-v657-and-v710) is set to `ON`, TiDB does not provide services until the initial statistics are loaded. In this case, you need to observe the loading process to estimate the service start time. + + Starting from v8.3.0, TiDB prints the progress of loading initial statistics in stages in the log, so you can understand the running status. To provide formatted results to external tools, TiDB adds the additional [monitoring API](/tidb-monitoring-api.md) so you can obtain the progress of loading initial statistics at any time during the startup phase. + +* Add metrics about Request Unit (RU) settings [#8444](https://github.com/tikv/pd/issues/8444) @[nolouch](https://github.com/nolouch) + +### Security + +* Enhance PD log redaction [#8305](https://github.com/tikv/pd/issues/8305) @[JmPotato](https://github.com/JmPotato) + + TiDB v8.0.0 enhances log redaction and supports marking user data in TiDB logs with `‹ ›`. Based on the marked logs, you can decide whether to redact the marked information when displaying the logs, thus increasing the flexibility of log redaction. In v8.2.0, TiFlash implements a similar log redaction enhancement. + + In v8.3.0, PD implements a similar log redaction enhancement. To use this feature, you can set the value of the PD configuration item `security.redact-info-log` to `"marker"`. + + For more information, see [documentation](/log-redaction.md#log-redaction-in-pd-side). + +* Enhance TiKV log redaction [#17206](https://github.com/tikv/tikv/issues/17206) @[lucasliang](https://github.com/LykxSassinator) + + TiDB v8.0.0 enhances log redaction and supports marking user data in TiDB logs with `‹ ›`. Based on the marked logs, you can decide whether to redact the marked information when displaying the logs, thus increasing the flexibility of log redaction. In v8.2.0, TiFlash implements a similar log redaction enhancement. + + In v8.3.0, TiKV implements a similar log redaction enhancement. To use this feature, you can set the value of the TiKV configuration item `security.redact-info-log` to `"marker"`. + + For more information, see [documentation](/log-redaction.md#log-redaction-in-tikv-side). + +### Data migration + +* TiCDC supports replicating DDL statements in bi-directional replication (BDR) mode (GA) [#10301](https://github.com/pingcap/tiflow/issues/10301) [#48519](https://github.com/pingcap/tidb/issues/48519) @[okJiang](https://github.com/okJiang) @[asddongmen](https://github.com/asddongmen) + + TiCDC v7.6.0 introduced the replication of DDL statements with bi-directional replication configured. Previously, bi-directional replication of DDL statements was not supported by TiCDC, so users of TiCDC's bi-directional replication had to execute DDL statements on both TiDB clusters separately. With this feature, after assigning a `PRIMARY` BDR role to a cluster, TiCDC can replicate the DDL statements from that cluster to the `SECONDARY` cluster. + + In v8.3.0, this feature becomes generally available (GA). + + For more information, see [documentation](/ticdc/ticdc-bidirectional-replication.md). + +## Compatibility changes + +> **Note:** +> +> This section provides compatibility changes you need to know when you upgrade from v8.2.0 to the current version (v8.3.0). If you are upgrading from v8.1.0 or earlier versions to the current version, you might also need to check the compatibility changes introduced in intermediate versions. + +### Behavior changes + +* To avoid incorrect use of commands, `pd-ctl` cancels the prefix matching mechanism. For example, `store remove-tombstone` cannot be called via `store remove` [#8413](https://github.com/tikv/pd/issues/8413) @[lhy1024](https://github.com/lhy1024) + +### System variables + +| Variable name | Change type | Description | +|--------|------------------------------|------| +| [`tidb_ddl_reorg_batch_size`](/system-variables.md#tidb_ddl_reorg_batch_size) | Modified | Adds the SESSION scope. | +| [`tidb_ddl_reorg_worker_cnt`](/system-variables.md#tidb_ddl_reorg_worker_cnt) | Modified | Adds the SESSION scope. | +| [`tidb_gc_concurrency`](/system-variables.md#tidb_gc_concurrency-new-in-v50) | Modified | Starting from v8.3.0, this variable controls the number of concurrent threads during the [Resolve Locks](/garbage-collection-overview.md#resolve-locks) and [Delete Range](/garbage-collection-overview.md#delete-ranges) steps of the [Garbage Collection (GC)](/garbage-collection-overview.md) process. Before v8.3.0, this variable only controls the number of threads during the [Resolve Locks](/garbage-collection-overview.md#resolve-locks) step. | +| [`tidb_low_resolution_tso`](/system-variables.md#tidb_low_resolution_tso) | Modified | Adds the GLOBAL scope. | +| [`tidb_opt_projection_push_down`](/system-variables.md#tidb_opt_projection_push_down-new-in-v610) | Modified | Adds the GLOBAL scope and persists the variable value to the cluster. Changes the default value from `OFF` to `ON` after further tests, which means that the optimizer is allowed to push `Projection` down to the TiKV coprocessor. | +| [`tidb_analyze_column_options`](/system-variables.md#tidb_analyze_column_options-new-in-v830) | Newly added | Controls the behavior of the `ANALYZE TABLE` statement. Setting it to the default value `PREDICATE` means only collecting statistics for [predicate columns](/statistics.md#collect-statistics-on-some-columns); setting it to `ALL` means collecting statistics for all columns. | +| [`tidb_enable_lazy_cursor_fetch`](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830) | Newly added | Controls the behavior of the [Cursor Fetch](/develop/dev-guide-connection-parameters.md#use-streamingresult-to-get-the-execution-result) feature. | +| [`tidb_enable_shared_lock_promotion`](/system-variables.md#tidb_enable_shared_lock_promotion-new-in-v830) | Newly added | Controls whether to enable the feature of upgrading shared locks to exclusive locks. The default value of this variable is `OFF`, which means that the function of upgrading shared locks to exclusive locks is disabled. | +| [`tiflash_hashagg_preaggregation_mode`](/system-variables.md#tiflash_hashagg_preaggregation_mode-new-in-v830) | Newly added | Controls the pre-aggregation strategy used during the first stage of two-stage or three-stage HashAgg operations pushed down to TiFlash. | + +### Configuration file parameters + +| Configuration file | Configuration parameter | Change type | Description | +| -------- | -------- | -------- | -------- | +| TiDB | [`tikv-client.batch-policy`](/tidb-configuration-file.md#batch-policy-new-in-v830) | Newly added | Controls the batching strategy for requests from TiDB to TiKV. | +| PD | [`security.redact-info-log`](/pd-configuration-file.md#redact-info-log-new-in-v50) | Modified | Support setting the value of the PD configuration item `security.redact-info-log` to `"marker"` to mark sensitive information in the log with `‹ ›` instead of shielding it directly. With the `"marker"` option, you can customize the redaction rules. | +| TiKV | [`security.redact-info-log`](/tikv-configuration-file.md#redact-info-log-new-in-v408) | Modified | Support setting the value of the TiKV configuration item `security.redact-info-log` to `"marker"` to mark sensitive information in the log with `‹ ›` instead of shielding it directly. With the `"marker"` option, you can customize the redaction rules. | +| TiFlash | [`security.redact-info-log`](/tiflash/tiflash-configuration.md#configure-the-tiflash-learnertoml-file) | Modified | Support setting the value of the TiFlash Learner configuration item `security.redact-info-log` to `"marker"` to mark sensitive information in the log with `‹ ›` instead of shielding it directly. With the `"marker"` option, you can customize the redaction rules. | +| BR | [`--allow-pitr-from-incremental`](/br/br-incremental-guide.md#limitations) | Newly added | Controls whether incremental backups are compatible with subsequent log backups. The default value is `true`, which means that incremental backups are compatible with subsequent log backups. When you keep the default value `true`, the DDLs that need to be replayed are strictly checked before the incremental restore begins. | + +### System tables + +* The [`INFORMATION_SCHEMA.PROCESSLIST`](/information-schema/information-schema-processlist.md) and [`INFORMATION_SCHEMA.CLUSTER_PROCESSLIST`](/information-schema/information-schema-processlist.md#cluster_processlist) system tables add the `SESSION_ALIAS` field to show the number of rows currently affected by the DML statement [#46889](https://github.com/pingcap/tidb/issues/46889) @[lcwangchao](https://github.com/lcwangchao) + +## Deprecated features + +* The following features are deprecated starting from v8.3.0: + + * Starting from v7.5.0, [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) replication is deprecated. Starting from v8.3.0, TiDB Binlog is fully deprecated, with removal planned for a future release. For incremental data replication, use [TiCDC](/ticdc/ticdc-overview.md) instead. For point-in-time recovery (PITR), use [PITR](/br/br-pitr-guide.md). + * Starting from v8.3.0, the [`tidb_enable_column_tracking`](/system-variables.md#tidb_enable_column_tracking-new-in-v540) system variable is deprecated. TiDB tracks predicate columns by default. For more information, see [`tidb_analyze_column_options`](/system-variables.md#tidb_analyze_column_options-new-in-v830). + +* The following features are planned for deprecation in future versions: + + * TiDB introduces the system variable [`tidb_enable_auto_analyze_priority_queue`](/system-variables.md#tidb_enable_auto_analyze_priority_queue-new-in-v800), which controls whether priority queues are enabled to optimize the ordering of tasks that automatically collect statistics. In future releases, the priority queue will be the only way to order tasks for automatically collecting statistics, so this system variable will be deprecated. + * TiDB introduces the system variable [`tidb_enable_async_merge_global_stats`](/system-variables.md#tidb_enable_async_merge_global_stats-new-in-v750) in v7.5.0. You can use it to set TiDB to use asynchronous merging of partition statistics to avoid OOM issues. In future releases, partition statistics will be merged asynchronously, so this system variable will be deprecated. + * It is planned to redesign [the automatic evolution of execution plan bindings](/sql-plan-management.md#baseline-evolution) in subsequent releases, and the related variables and behavior will change. + * In v8.0.0, TiDB introduces the [`tidb_enable_parallel_hashagg_spill`](/system-variables.md#tidb_enable_parallel_hashagg_spill-new-in-v800) system variable to control whether TiDB supports disk spill for the concurrent HashAgg algorithm. In future versions, the [`tidb_enable_parallel_hashagg_spill`](/system-variables.md#tidb_enable_parallel_hashagg_spill-new-in-v800) system variable will be deprecated. + * The TiDB Lightning parameter [`conflict.max-record-rows`](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task) is planned for deprecation in a future release and will be subsequently removed. This parameter will be replaced by [`conflict.threshold`](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task), which means that the maximum number of conflicting records is consistent with the maximum number of conflicting records that can be tolerated in a single import task. + +* The following features are planned for removal in future versions: + + * Starting from v8.0.0, TiDB Lightning deprecates the [old version of conflict detection](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md#the-old-version-of-conflict-detection-deprecated-in-v800) strategy for the physical import mode, and enables you to control the conflict detection strategy for both logical and physical import modes via the [`conflict.strategy`](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task) parameter. The [`duplicate-resolution`](/tidb-lightning/tidb-lightning-configuration.md) parameter for the old version of conflict detection will be removed in a future release. + +## Improvements + ++ TiDB + + - Support the `SELECT ... STRAIGHT_JOIN ... USING ( ... )` statement [#54162](https://github.com/pingcap/tidb/issues/54162) @[dveeden](https://github.com/dveeden) + - Construct more precise index access ranges for filter conditions like `((idx_col_1 > 1) or (idx_col_1 = 1 and idx_col_2 > 10)) and ((idx_col_1 < 10) or (idx_col_1 = 10 and idx_col_2 < 20))` [#54337](https://github.com/pingcap/tidb/issues/54337) @[ghazalfamilyusa](https://github.com/ghazalfamilyusa) + - Use index order to avoid extra sorting operations for SQL queries like `WHERE idx_col_1 IS NULL ORDER BY idx_col_2` [#54188](https://github.com/pingcap/tidb/issues/54188) @[ari-e](https://github.com/ari-e) + - Display analyzed indexes in the `mysql.analyze_jobs` system table [#53567](https://github.com/pingcap/tidb/issues/53567) @[hi-rustin](https://github.com/hi-rustin) + - Support applying the `tidb_redact_log` setting to the output of `EXPLAIN` statements [#54565](https://github.com/pingcap/tidb/issues/54565) @[hawkingrei](https://github.com/hawkingrei) + - Support generating the `Selection` operator on `IndexRangeScan` for multi-valued indexes to improve query efficiency [#54876](https://github.com/pingcap/tidb/issues/54876) @[time-and-fate](https://github.com/time-and-fate) + - Support killing automatic `ANALYZE` tasks that are running outside the set time window [#55283](https://github.com/pingcap/tidb/issues/55283) @[hawkingrei](https://github.com/hawkingrei) + - Adjust estimation results from 0 to 1 for equality conditions that do not hit TopN when statistics are entirely composed of TopN and the modified row count in the corresponding table statistics is non-zero [#47400](https://github.com/pingcap/tidb/issues/47400) @[terry1purcell](https://github.com/terry1purcell) + - The TopN operator supports disk spill [#47733](https://github.com/pingcap/tidb/issues/47733) @[xzhangxian1008](https://github.com/xzhangxian1008) + - TiDB node supports executing queries with the `WITH ROLLUP` modifier and the `GROUPING` function [#42631](https://github.com/pingcap/tidb/issues/42631) @[Arenatlx](https://github.com/Arenatlx) + - The system variable [`tidb_low_resolution_tso`](/system-variables.md#tidb_low_resolution_tso) supports the `GLOBAL` scope [#55022](https://github.com/pingcap/tidb/issues/55022) @[cfzjywxk](https://github.com/cfzjywxk) + - Improve GC (Garbage Collection) efficiency by supporting concurrent range deletion. You can control the number of concurrent threads using [`tidb_gc_concurrency`](/system-variables.md#tidb_gc_concurrency-new-in-v50) [#54570](https://github.com/pingcap/tidb/issues/54570) @[ekexium](https://github.com/ekexium) + - Improve the performance of bulk DML execution mode (`tidb_dml_type = "bulk"`) [#50215](https://github.com/pingcap/tidb/issues/50215) @[ekexium](https://github.com/ekexium) + - Improve the performance of schema information cache-related interface `SchemaByID` [#54074](https://github.com/pingcap/tidb/issues/54074) @[ywqzzy](https://github.com/ywqzzy) + - Improve the query performance for certain system tables when schema information caching is enabled [#50305](https://github.com/pingcap/tidb/issues/50305) @[tangenta](https://github.com/tangenta) + - Optimize error messages for conflicting keys when adding unique indexes [#53004](https://github.com/pingcap/tidb/issues/53004) @[lance6716](https://github.com/lance6716) + ++ PD + + - Support modifying the `batch` configuration of the `evict-leader-scheduler` via `pd-ctl` to accelerate the leader eviction process [#8265](https://github.com/tikv/pd/issues/8265) @[rleungx](https://github.com/rleungx) + - Add the `store_id` monitoring metric to the **Cluster > Label distribution** panel in Grafana to display store IDs corresponding to different labels [#8337](https://github.com/tikv/pd/issues/8337) @[HuSharp](https://github.com/HuSharp) + - Support fallback to the default resource group when the specified resource group does not exist [#8388](https://github.com/tikv/pd/issues/8388) @[JmPotato](https://github.com/JmPotato) + - Add the `approximate_kv_size` field to the Region information output by the `region` command in `pd-ctl` [#8412](https://github.com/tikv/pd/issues/8412) @[zeminzhou](https://github.com/zeminzhou) + - Optimize the message that returns when you call the PD API to delete the TTL configuration [#8450](https://github.com/tikv/pd/issues/8450) @[lhy1024](https://github.com/lhy1024) + - Optimize the RU consumption behavior of large query read requests to reduce the impact on other requests [#8457](https://github.com/tikv/pd/issues/8457) @[nolouch](https://github.com/nolouch) + - Optimize the error message that returns when you misconfigure PD microservices [#52912](https://github.com/pingcap/tidb/issues/52912) @[rleungx](https://github.com/rleungx) + - Add the `--name` startup parameter to PD microservices to more accurately display the service name during deployment [#7995](https://github.com/tikv/pd/issues/7995) @[HuSharp](https://github.com/HuSharp) + - Support dynamically adjusting `PatrolRegionScanLimit` based on the number of Regions to reduce Region scan time [#7963](https://github.com/tikv/pd/issues/7963) @[lhy1024](https://github.com/lhy1024) + ++ TiKV + + - Optimize the batching policy for writing Raft logs when `async-io` is enabled to reduce the consumption of disk I/O bandwidth resources [#16907](https://github.com/tikv/tikv/issues/16907) @[LykxSassinator](https://github.com/LykxSassinator) + - Redesign the TiCDC delegate and downstream modules to better support Region partial subscription [#16362](https://github.com/tikv/tikv/issues/16362) @[hicqu](https://github.com/hicqu) + - Reduce the size of a single slow query log [#17294](https://github.com/tikv/tikv/issues/17294) @[Connor1996](https://github.com/Connor1996) + - Add a new monitoring metric `min safe ts` [#17307](https://github.com/tikv/tikv/issues/17307) @[mittalrishabh](https://github.com/mittalrishabh) + - Reduce the memory usage of the peer message channel [#16229](https://github.com/tikv/tikv/issues/16229) @[Connor1996](https://github.com/Connor1996) + ++ TiFlash + + - Support generating ad hoc heap profiling in SVG format [#9320](https://github.com/pingcap/tiflash/issues/9320) @[CalvinNeo](https://github.com/CalvinNeo) + ++ Tools + + + Backup & Restore (BR) + + - Support checking whether a full backup exists before starting point-in-time recovery (PITR) for the first time. If the full backup is not found, BR terminates the restore and returns an error [#54418](https://github.com/pingcap/tidb/issues/54418) @[Leavrth](https://github.com/Leavrth) + - Support checking whether the disk space in TiKV and TiFlash is sufficient before restoring snapshot backups. If the space is insufficient, BR terminates the restore and returns an error [#54316](https://github.com/pingcap/tidb/issues/54316) @[RidRisR](https://github.com/RidRisR) + - Support checking whether the disk space in TiKV is sufficient before TiKV downloads each SST file. If the space is insufficient, BR terminates the restore and returns an error [#17224](https://github.com/tikv/tikv/issues/17224) @[RidRisR](https://github.com/RidRisR) + - Support setting Alibaba Cloud access credentials through environment variables [#45551](https://github.com/pingcap/tidb/issues/45551) @[RidRisR](https://github.com/RidRisR) + - Optimize the backup feature, improving backup performance and stability during node restarts, cluster scaling-out, and network jitter when backing up large numbers of tables [#52534](https://github.com/pingcap/tidb/issues/52534) @[3pointer](https://github.com/3pointer) + - Automatically set the environment variable `GOMEMLIMIT` based on the available memory of the BR process to avoid OOM when using BR for backup and restore [#53777](https://github.com/pingcap/tidb/issues/53777) @[Leavrth](https://github.com/Leavrth) + - Make incremental backups compatible with point-in-time recovery (PITR) [#54474](https://github.com/pingcap/tidb/issues/54474) @[3pointer](https://github.com/3pointer) + - Support backing up and restoring the `mysql.column_stats_usage` table [#53567](https://github.com/pingcap/tidb/issues/53567) @[hi-rustin](https://github.com/hi-rustin) + +## Bug fixes + ++ TiDB + + - Reset the parameters in the `Open` method of `PipelinedWindow` to fix the unexpected error that occurs when the `PipelinedWindow` is used as a child node of `Apply` due to the reuse of previous parameter values caused by repeated opening and closing operations [#53600](https://github.com/pingcap/tidb/issues/53600) @[XuHuaiyu](https://github.com/XuHuaiyu) + - Fix the issue that the query might get stuck when terminated because the memory usage exceeds the limit set by `tidb_mem_quota_query` [#55042](https://github.com/pingcap/tidb/issues/55042) @[yibin87](https://github.com/yibin87) + - Fix the issue that the disk spill for the HashAgg operator causes incorrect query results during parallel calculation [#55290](https://github.com/pingcap/tidb/issues/55290) @[xzhangxian1008](https://github.com/xzhangxian1008) + - Fix the issue of wrong `JSON_TYPE` when casting `YEAR` to JSON format [#54494](https://github.com/pingcap/tidb/issues/54494) @[YangKeao](https://github.com/YangKeao) + - Fix the issue that the value range of the `tidb_schema_cache_size` system variable is wrong [#54034](https://github.com/pingcap/tidb/issues/54034) @[lilinghai](https://github.com/lilinghai) + - Fix the issue that partition pruning does not work when the partition expression is `EXTRACT(YEAR FROM col)` [#54210](https://github.com/pingcap/tidb/issues/54210) @[mjonss](https://github.com/mjonss) + - Fix the issue that `FLASHBACK DATABASE` fails when many tables exist in the database [#54415](https://github.com/pingcap/tidb/issues/54415) @[lance6716](https://github.com/lance6716) + - Fix the issue that `FLASHBACK DATABASE` enters an infinite loop when handling many databases [#54915](https://github.com/pingcap/tidb/issues/54915) @[lance6716](https://github.com/lance6716) + - Fix the issue that adding an index in index acceleration mode might fail [#54568](https://github.com/pingcap/tidb/issues/54568) @[lance6716](https://github.com/lance6716) + - Fix the issue that `ADMIN CANCEL DDL JOBS` might cause DDL to fail [#54687](https://github.com/pingcap/tidb/issues/54687) @[lance6716](https://github.com/lance6716) + - Fix the issue that table replication fails when the index length of the table replicated from DM exceeds the maximum length specified by `max-index-length` [#55138](https://github.com/pingcap/tidb/issues/55138) @[lance6716](https://github.com/lance6716) + - Fix the issue that the error `runtime error: index out of range` might occur when executing SQL statements with `tidb_enable_inl_join_inner_multi_pattern` enabled [#54535](https://github.com/pingcap/tidb/issues/54535) @[joechenrh](https://github.com/joechenrh) + - Fix the issue that you cannot exit TiDB using Control+C during the process of initializing statistics [#54589](https://github.com/pingcap/tidb/issues/54589) @[tiancaiamao](https://github.com/tiancaiamao) + - Fix the issue that the `INL_MERGE_JOIN` optimizer hint returns incorrect results by deprecating it [#54064](https://github.com/pingcap/tidb/issues/54064) @[AilinKid](https://github.com/AilinKid) + - Fix the issue that a correlated subquery that contains `WITH ROLLUP` might cause TiDB to panic and return the error `runtime error: index out of range` [#54983](https://github.com/pingcap/tidb/issues/54983) @[AilinKid](https://github.com/AilinKid) + - Fix the issue that predicates cannot be pushed down properly when the filter condition of a SQL query contains virtual columns and the execution condition contains `UnionScan` [#54870](https://github.com/pingcap/tidb/issues/54870) @[qw4990](https://github.com/qw4990) + - Fix the issue that the error `runtime error: invalid memory address or nil pointer dereference` might occur when executing SQL statements with `tidb_enable_inl_join_inner_multi_pattern` enabled [#55169](https://github.com/pingcap/tidb/issues/55169) @[hawkingrei](https://github.com/hawkingrei) + - Fix the issue that a query statement that contains `UNION` might return incorrect results [#52985](https://github.com/pingcap/tidb/issues/52985) @[XuHuaiyu](https://github.com/XuHuaiyu) + - Fix the issue that the `tot_col_size` column in the `mysql.stats_histograms` table might be a negative number [#55126](https://github.com/pingcap/tidb/issues/55126) @[qw4990](https://github.com/qw4990) + - Fix the issue that `columnEvaluator` cannot identify the column references in the input chunk, which leads to `runtime error: index out of range` when executing SQL statements [#53713](https://github.com/pingcap/tidb/issues/53713) @[AilinKid](https://github.com/AilinKid) + - Fix the issue that `STATS_EXTENDED` becomes a reserved keyword [#39573](https://github.com/pingcap/tidb/issues/39573) @[wddevries](https://github.com/wddevries) + - Fix the issue that when `tidb_low_resolution` is enabled, `select for update` can be executed [#54684](https://github.com/pingcap/tidb/issues/54684) @[cfzjywxk](https://github.com/cfzjywxk) + - Fix the issue that internal SQL queries cannot be displayed in the slow query log when `tidb_redact_log` is enabled [#54190](https://github.com/pingcap/tidb/issues/54190) @[lcwangchao](https://github.com/lcwangchao) + - Fix the issue that the memory used by transactions might be tracked multiple times [#53984](https://github.com/pingcap/tidb/issues/53984) @[ekexium](https://github.com/ekexium) + - Fix the issue that using `SHOW WARNINGS;` to obtain warnings might cause a panic [#48756](https://github.com/pingcap/tidb/issues/48756) @[xhebox](https://github.com/xhebox) + - Fix the issue that loading index statistics might cause memory leaks [#54022](https://github.com/pingcap/tidb/issues/54022) @[hi-rustin](https://github.com/hi-rustin) + - Fix the issue that the `LENGTH()` condition is unexpectedly removed when the collation is `utf8_bin` or `utf8mb4_bin` [#53730](https://github.com/pingcap/tidb/issues/53730) @[elsa0520](https://github.com/elsa0520) + - Fix the issue that statistics collection does not update the `stats_history` table when encountering duplicate primary keys [#47539](https://github.com/pingcap/tidb/issues/47539) @[Defined2014](https://github.com/Defined2014) + - Fix the issue that recursive CTE queries might result in invalid pointers [#54449](https://github.com/pingcap/tidb/issues/54449) @[hawkingrei](https://github.com/hawkingrei) + - Fix the issue that the Connection Count monitoring metric in Grafana is incorrect when some connections exit before the handshake is complete [#54428](https://github.com/pingcap/tidb/issues/54428) @[YangKeao](https://github.com/YangKeao) + - Fix the issue that the Connection Count of each resource group is incorrect when using TiProxy and resource groups [#54545](https://github.com/pingcap/tidb/issues/54545) @[YangKeao](https://github.com/YangKeao) + - Fix the issue that when queries contain non-correlated subqueries and `LIMIT` clauses, column pruning might be incomplete, resulting in a less optimal plan [#54213](https://github.com/pingcap/tidb/issues/54213) @[qw4990](https://github.com/qw4990) + - Fix the issue of reusing wrong point get plans for `SELECT ... FOR UPDATE` [#54652](https://github.com/pingcap/tidb/issues/54652) @[qw4990](https://github.com/qw4990) + - Fix the issue that the `TIMESTAMPADD()` function goes into an infinite loop when the first argument is `month` and the second argument is negative [#54908](https://github.com/pingcap/tidb/issues/54908) @[xzhangxian1008](https://github.com/xzhangxian1008) + - Fix the issue that internal SQL statements in the slow log are redacted to null by default [#54190](https://github.com/pingcap/tidb/issues/54190) [#52743](https://github.com/pingcap/tidb/issues/52743) [#53264](https://github.com/pingcap/tidb/issues/53264) @[lcwangchao](https://github.com/lcwangchao) + - Fix the issue that `PointGet` execution plans for `_tidb_rowid` can be generated [#54583](https://github.com/pingcap/tidb/issues/54583) @[Defined2014](https://github.com/Defined2014) + - Fix the issue that `SHOW IMPORT JOBS` reports an error `Unknown column 'summary'` after upgrading from v7.1 [#54241](https://github.com/pingcap/tidb/issues/54241) @[tangenta](https://github.com/tangenta) + - Fix the issue that obtaining the column information using `information_schema.columns` returns warning 1356 when a subquery is used as a column definition in a view definition [#54343](https://github.com/pingcap/tidb/issues/54343) @[lance6716](https://github.com/lance6716) + - Fix the issue that RANGE partitioned tables that are not strictly self-incrementing can be created [#54829](https://github.com/pingcap/tidb/issues/54829) @[Defined2014](https://github.com/Defined2014) + - Fix the issue that `INDEX_HASH_JOIN` cannot exit properly when SQL is abnormally interrupted [#54688](https://github.com/pingcap/tidb/issues/54688) @[wshwsh12](https://github.com/wshwsh12) + - Fix the issue that the network partition during adding indexes using the Distributed eXecution Framework (DXF) might cause inconsistent data indexes [#54897](https://github.com/pingcap/tidb/issues/54897) @[tangenta](https://github.com/tangenta) + ++ PD + + - Fix the issue that no error is reported when binding a role to a resource group [#54417](https://github.com/pingcap/tidb/issues/54417) @[JmPotato](https://github.com/JmPotato) + - Fix the issue that a resource group encounters quota limits when requesting tokens for more than 500 ms [#8349](https://github.com/tikv/pd/issues/8349) @[nolouch](https://github.com/nolouch) + - Fix the issue that the time data type in the `INFORMATION_SCHEMA.RUNAWAY_WATCHES` table is incorrect [#54770](https://github.com/pingcap/tidb/issues/54770) @[HuSharp](https://github.com/HuSharp) + - Fix the issue that resource groups could not effectively limit resource usage under high concurrency [#8435](https://github.com/tikv/pd/issues/8435) @[nolouch](https://github.com/nolouch) + - Fix the issue that an incorrect PD API is called when you retrieve table attributes [#55188](https://github.com/pingcap/tidb/issues/55188) @[JmPotato](https://github.com/JmPotato) + - Fix the issue that the scaling progress is displayed incorrectly after the `scheduling` microservice is enabled [#8331](https://github.com/tikv/pd/issues/8331) @[rleungx](https://github.com/rleungx) + - Fix the issue that the encryption manager is not initialized before use [#8384](https://github.com/tikv/pd/issues/8384) @[rleungx](https://github.com/rleungx) + - Fix the issue that some logs are not redacted [#8419](https://github.com/tikv/pd/issues/8419) @[rleungx](https://github.com/rleungx) + - Fix the issue that redirection might panic during the startup of PD microservices [#8406](https://github.com/tikv/pd/issues/8406) @[HuSharp](https://github.com/HuSharp) + - Fix the issue that the `split-merge-interval` configuration item might not take effect when you modify its value repeatedly (such as changing it from `1s` to `1h` and back to `1s`) [#8404](https://github.com/tikv/pd/issues/8404) @[lhy1024](https://github.com/lhy1024) + - Fix the issue that setting `replication.strictly-match-label` to `true` causes TiFlash to fail to start [#8480](https://github.com/tikv/pd/issues/8480) @[rleungx](https://github.com/rleungx) + - Fix the issue that fetching TSO is slow when analyzing large partitioned tables, causing `ANALYZE` performance degradation [#8500](https://github.com/tikv/pd/issues/8500) @[rleungx](https://github.com/rleungx) + - Fix the potential data races in large clusters [#8386](https://github.com/tikv/pd/issues/8386) @[rleungx](https://github.com/rleungx) + - Fix the issue that when determining whether queries are Runaway Queries, TiDB only counts time consumption spent on the Coprocessor side while missing time consumption spent on the TiDB side, resulting in some queries not being identified as Runaway Queries [#51325](https://github.com/pingcap/tidb/issues/51325) @[HuSharp](https://github.com/HuSharp) + ++ TiFlash + + - Fix the issue that when using the `CAST()` function to convert a string to a datetime with a time zone or invalid characters, the result is incorrect [#8754](https://github.com/pingcap/tiflash/issues/8754) @[solotzg](https://github.com/solotzg) + - Fix the issue that TiFlash might panic after executing `RENAME TABLE ... TO ...` on a partitioned table with empty partitions across databases [#9132](https://github.com/pingcap/tiflash/issues/9132) @[JaySon-Huang](https://github.com/JaySon-Huang) + - Fix the issue that some queries might report a column type mismatch error after late materialization is enabled [#9175](https://github.com/pingcap/tiflash/issues/9175) @[JinheLin](https://github.com/JinheLin) + - Fix the issue that queries with virtual generated columns might return incorrect results after late materialization is enabled [#9188](https://github.com/pingcap/tiflash/issues/9188) @[JinheLin](https://github.com/JinheLin) + - Fix the issue that setting the SSL certificate configuration to an empty string in TiFlash incorrectly enables TLS and causes TiFlash to fail to start [#9235](https://github.com/pingcap/tiflash/issues/9235) @[JaySon-Huang](https://github.com/JaySon-Huang) + - Fix the issue that TiFlash might panic when a database is deleted shortly after creation [#9266](https://github.com/pingcap/tiflash/issues/9266) @[JaySon-Huang](https://github.com/JaySon-Huang) + - Fix the issue that a network partition (network disconnection) between TiFlash and any PD might cause read request timeout errors [#9243](https://github.com/pingcap/tiflash/issues/9243) @[Lloyd-Pottiger](https://github.com/Lloyd-Pottiger) + - Fix the issue that TiFlash write nodes might fail to restart in the disaggregated storage and compute architecture [#9282](https://github.com/pingcap/tiflash/issues/9282) @[JaySon-Huang](https://github.com/JaySon-Huang) + - Fix the issue that read snapshots of TiFlash write nodes are not released in a timely manner in the disaggregated storage and compute architecture [#9298](https://github.com/pingcap/tiflash/issues/9298) @[JinheLin](https://github.com/JinheLin) + ++ TiKV + + - Fix the issue that cleaning up stale regions might accidentally delete valid data [#17258](https://github.com/tikv/tikv/issues/17258) @[hbisheng](https://github.com/hbisheng) + - Fix the issue that `Ingestion picked level` and `Compaction Job Size(files)` are displayed incorrectly in the TiKV dashboard in Grafana [#15990](https://github.com/tikv/tikv/issues/15990) @[Connor1996](https://github.com/Connor1996) + - Fix the issue that `cancel_generating_snap` incorrectly updating `snap_tried_cnt` causes TiKV to panic [#17226](https://github.com/tikv/tikv/issues/17226) @[hbisheng](https://github.com/hbisheng) + - Fix the issue that the information of `Ingest SST duration seconds` is incorrect [#17239](https://github.com/tikv/tikv/issues/17239) @[LykxSassinator](https://github.com/LykxSassinator) + - Fix the issue that CPU profiling flag is not reset correctly when an error occurs [#17234](https://github.com/tikv/tikv/issues/17234) @[Connor1996](https://github.com/Connor1996) + - Fix the issue that bloom filters are incompatible between earlier versions (earlier than v7.1) and later versions [#17272](https://github.com/tikv/tikv/issues/17272) @[v01dstar](https://github.com/v01dstar) + ++ Tools + + + Backup & Restore (BR) + + - Fix the issue that DDLs requiring backfilling, such as `ADD INDEX` and `MODIFY COLUMN`, might not be correctly recovered during incremental restore [#54426](https://github.com/pingcap/tidb/issues/54426) @[3pointer](https://github.com/3pointer) + - Fix the issue that the progress is stuck during backup and restore [#54140](https://github.com/pingcap/tidb/issues/54140) @[Leavrth](https://github.com/Leavrth) + - Fix the issue that the checkpoint path of backup and restore is incompatible with some external storage [#55265](https://github.com/pingcap/tidb/issues/55265) @[Leavrth](https://github.com/Leavrth) + + + TiCDC + + - Fix the issue that the processor might get stuck when the downstream Kafka is inaccessible [#11340](https://github.com/pingcap/tiflow/issues/11340) @[asddongmen](https://github.com/asddongmen) + + + TiDB Data Migration (DM) + + - Fix the issue that schema tracker incorrectly handles LIST partition tables, causing DM errors [#11408](https://github.com/pingcap/tiflow/issues/11408) @[lance6716](https://github.com/lance6716) + - Fix the issue that data replication is interrupted when the index length exceeds the default value of `max-index-length` [#11459](https://github.com/pingcap/tiflow/issues/11459) @[michaelmdeng](https://github.com/michaelmdeng) + - Fix the issue that DM cannot handle `FAKE_ROTATE_EVENT` correctly [#11381](https://github.com/pingcap/tiflow/issues/11381) @[lance6716](https://github.com/lance6716) + + + TiDB Lightning + + - Fix the issue that TiDB Lightning outputs a confusing `WARN` log when it fails to obtain the keyspace name [#54232](https://github.com/pingcap/tidb/issues/54232) @[kennytm](https://github.com/kennytm) + - Fix the issue that the TLS configuration of TiDB Lightning affects cluster certificates [#54172](https://github.com/pingcap/tidb/issues/54172) @[ei-sugimoto](https://github.com/ei-sugimoto) + - Fix the issue that transaction conflicts occur during data import using TiDB Lightning [#49826](https://github.com/pingcap/tidb/issues/49826) @[lance6716](https://github.com/lance6716) + - Fix the issue that large checkpoint files cause performance degradation during the import of numerous databases and tables [#55054](https://github.com/pingcap/tidb/issues/55054) @[D3Hunter](https://github.com/D3Hunter) + +## Contributors + +We would like to thank the following contributors from the TiDB community: + +- [ari-e](https://github.com/ari-e) +- [ei-sugimoto](https://github.com/ei-sugimoto) +- [HaoW30](https://github.com/HaoW30) +- [JackL9u](https://github.com/JackL9u) +- [michaelmdeng](https://github.com/michaelmdeng) +- [mittalrishabh](https://github.com/mittalrishabh) +- [qingfeng777](https://github.com/qingfeng777) +- [renovate](https://github.com/apps/renovate) +- [SandeepPadhi](https://github.com/SandeepPadhi) +- [yzhan1](https://github.com/yzhan1) diff --git a/releases/release-notes.md b/releases/release-notes.md index 9afeebeb4802d..cd6e09ee82b5e 100644 --- a/releases/release-notes.md +++ b/releases/release-notes.md @@ -1,13 +1,17 @@ --- title: Release Notes aliases: ['/docs/dev/releases/release-notes/','/docs/dev/releases/rn/'] -summary: TiDB has released multiple versions, including 8.2.0, 8.1.0, 8.0.0-DMR, 7.6.0-DMR, 7.5.1, 7.5.0, 7.4.0-DMR, 7.3.0-DMR, 7.2.0-DMR, 7.1.4, 7.1.3, 7.1.2, 7.1.1, 7.1.0, 7.0.0-DMR, 6.6.0-DMR, 6.5.9, 6.5.8, 6.5.7, 6.5.6, 6.5.5, 6.5.4, 6.5.3, 6.5.2, 6.5.1, 6.5.0, 6.4.0-DMR, 6.3.0-DMR, 6.2.0-DMR, 6.1.7, 6.1.6, 6.1.5, 6.1.4, 6.1.3, 6.1.2, 6.1.1, 6.1.0, 6.0.0-DMR, 5.4.3, 5.4.2, 5.4.1, 5.4.0, 5.3.4, 5.3.3, 5.3.2, 5.3.1, 5.3.0, 5.2.4, 5.2.3, 5.2.2, 5.2.1, 5.2.0, 5.1.5, 5.1.4, 5.1.3, 5.1.2, 5.1.1, 5.1.0, 5.0.6, 5.0.5, 5.0.4, 5.0.3, 5.0.2, 5.0.1, 5.0.0, 5.0.0-rc, 4.0.16, 4.0.15, 4.0.14, 4.0.13, 4.0.12, 4.0.11, 4.0.10, 4.0.9, 4.0.8, 4.0.7, 4.0.6, 4.0.5, 4.0.4, 4.0.3, 4.0.2, 4.0.1, 4.0.0, 4.0.0-rc.2, 4.0.0-rc.1, 4.0.0-rc, 4.0.0-beta.2, 4.0.0-beta.1, 4.0.0-beta, 3.1.2, 3.1.1, 3.1.0, 3.1.0-rc, 3.1.0-beta.2, 3.1.0-beta.1, 3.1.0-beta, 3.0.20, 3.0.19, 3.0.18, 3.0.17, 3.0.16, 3.0.15, 3.0.14, 3.0.13, 3.0.12, 3.0.11, 3.0.10, 3.0.9, 3.0.8, 3.0.7, 3.0.6, 3.0.5, 3.0.4, 3.0.3, 3.0.2, 3.0.1, 3.0.0, 3.0.0-rc.3, 3.0.0-rc.2, 3.0.0-rc.1, 3.0.0-beta.1, 3.0.0-beta, 2.1.19, 2.1.18, 2.1.17, 2.1.16, 2.1.15, 2.1.14, 2.1.13, 2.1.12, 2.1.11, 2.1.10, 2.1.9, 2.1.8, 2.1.7, 2.1.6, 2.1.5, 2.1.4, 2.1.3, 2.1.2, 2.1.1, 2.1.0, 2.1.0-rc.5, 2.1.0-rc.4, 2.1.0-rc.3, 2.1.0-rc.2, 2.1.0-rc.1, 2.1.0-beta, 2.0.11, 2.0.10, 2.0.9, 2.0.8, 2.0.7, 2.0.6, 2.0.5, 2.0.4, 2.0.3, 2.0.2, 2.0.1, 2.0.0, 2.0.0-rc.5, 2.0.0-rc.4, 2.0.0-rc.3, 2.0.0-rc.1, 1.1.0-beta, 1.1.0-alpha, 1.0.8, 1.0.7, 1.0.6, 1.0.5, 1.0.4, 1.0.3, 1.0.2, 1.0.1, 1.0.0, Pre-GA, rc4, rc3, rc2, rc1. +summary: TiDB has released multiple versions, including 8.3.0-DMR, 8.2.0-DMR, 8.1.0, 8.0.0-DMR, 7.6.0-DMR, 7.5.1, 7.5.0, 7.4.0-DMR, 7.3.0-DMR, 7.2.0-DMR, 7.1.4, 7.1.3, 7.1.2, 7.1.1, 7.1.0, 7.0.0-DMR, 6.6.0-DMR, 6.5.9, 6.5.8, 6.5.7, 6.5.6, 6.5.5, 6.5.4, 6.5.3, 6.5.2, 6.5.1, 6.5.0, 6.4.0-DMR, 6.3.0-DMR, 6.2.0-DMR, 6.1.7, 6.1.6, 6.1.5, 6.1.4, 6.1.3, 6.1.2, 6.1.1, 6.1.0, 6.0.0-DMR, 5.4.3, 5.4.2, 5.4.1, 5.4.0, 5.3.4, 5.3.3, 5.3.2, 5.3.1, 5.3.0, 5.2.4, 5.2.3, 5.2.2, 5.2.1, 5.2.0, 5.1.5, 5.1.4, 5.1.3, 5.1.2, 5.1.1, 5.1.0, 5.0.6, 5.0.5, 5.0.4, 5.0.3, 5.0.2, 5.0.1, 5.0.0, 5.0.0-rc, 4.0.16, 4.0.15, 4.0.14, 4.0.13, 4.0.12, 4.0.11, 4.0.10, 4.0.9, 4.0.8, 4.0.7, 4.0.6, 4.0.5, 4.0.4, 4.0.3, 4.0.2, 4.0.1, 4.0.0, 4.0.0-rc.2, 4.0.0-rc.1, 4.0.0-rc, 4.0.0-beta.2, 4.0.0-beta.1, 4.0.0-beta, 3.1.2, 3.1.1, 3.1.0, 3.1.0-rc, 3.1.0-beta.2, 3.1.0-beta.1, 3.1.0-beta, 3.0.20, 3.0.19, 3.0.18, 3.0.17, 3.0.16, 3.0.15, 3.0.14, 3.0.13, 3.0.12, 3.0.11, 3.0.10, 3.0.9, 3.0.8, 3.0.7, 3.0.6, 3.0.5, 3.0.4, 3.0.3, 3.0.2, 3.0.1, 3.0.0, 3.0.0-rc.3, 3.0.0-rc.2, 3.0.0-rc.1, 3.0.0-beta.1, 3.0.0-beta, 2.1.19, 2.1.18, 2.1.17, 2.1.16, 2.1.15, 2.1.14, 2.1.13, 2.1.12, 2.1.11, 2.1.10, 2.1.9, 2.1.8, 2.1.7, 2.1.6, 2.1.5, 2.1.4, 2.1.3, 2.1.2, 2.1.1, 2.1.0, 2.1.0-rc.5, 2.1.0-rc.4, 2.1.0-rc.3, 2.1.0-rc.2, 2.1.0-rc.1, 2.1.0-beta, 2.0.11, 2.0.10, 2.0.9, 2.0.8, 2.0.7, 2.0.6, 2.0.5, 2.0.4, 2.0.3, 2.0.2, 2.0.1, 2.0.0, 2.0.0-rc.5, 2.0.0-rc.4, 2.0.0-rc.3, 2.0.0-rc.1, 1.1.0-beta, 1.1.0-alpha, 1.0.8, 1.0.7, 1.0.6, 1.0.5, 1.0.4, 1.0.3, 1.0.2, 1.0.1, 1.0.0, Pre-GA, rc4, rc3, rc2, rc1. --- # TiDB Release Notes +## 8.3 + +- [8.3.0-DMR](/releases/release-8.3.0.md): 2024-08-22 + ## 8.2 - [8.2.0-DMR](/releases/release-8.2.0.md): 2024-07-11 diff --git a/releases/release-timeline.md b/releases/release-timeline.md index fe82fa5f0311d..a0dbbe0405ddf 100644 --- a/releases/release-timeline.md +++ b/releases/release-timeline.md @@ -11,6 +11,7 @@ This document shows all the released TiDB versions in reverse chronological orde | Version | Release Date | | :--- | :--- | +| [8.3.0-DMR](/releases/release-8.3.0.md) | 2024-08-22 | | [7.5.3](/releases/release-7.5.3.md) | 2024-08-05 | | [8.2.0-DMR](/releases/release-8.2.0.md) | 2024-07-11 | | [6.5.10](/releases/release-6.5.10.md) | 2024-06-20 | From b0e44b0845ce1b816a6f50f458b20e7b81bc66f9 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Aug 2024 11:15:13 +0800 Subject: [PATCH 41/44] basic-features.md: add a column for v8.3.0 (#18604) --- basic-features.md | 418 +++++++++++++++++++++++----------------------- 1 file changed, 210 insertions(+), 208 deletions(-) diff --git a/basic-features.md b/basic-features.md index 8793addb4c925..f64d06fbd0da0 100644 --- a/basic-features.md +++ b/basic-features.md @@ -22,245 +22,247 @@ You can try out TiDB features on [TiDB Playground](https://play.tidbcloud.com/?u ## Data types, functions, and operators -| Data types, functions, and operators | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [Numeric types](/data-type-numeric.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Date and time types](/data-type-date-and-time.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [String types](/data-type-string.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [JSON type](/data-type-json.md) | Y | Y | Y | Y | Y | E | E | E | E | E | -| [Control flow functions](/functions-and-operators/control-flow-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [String functions](/functions-and-operators/string-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Numeric functions and operators](/functions-and-operators/numeric-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Date and time functions](/functions-and-operators/date-and-time-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Bit functions and operators](/functions-and-operators/bit-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Cast functions and operators](/functions-and-operators/cast-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Encryption and compression functions](/functions-and-operators/encryption-and-compression-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Information functions](/functions-and-operators/information-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [JSON functions](/functions-and-operators/json-functions.md) | Y | Y | Y | Y | Y | E | E | E | E | E | -| [Aggregation functions](/functions-and-operators/aggregate-group-by-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Window functions](/functions-and-operators/window-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Miscellaneous functions](/functions-and-operators/miscellaneous-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Operators](/functions-and-operators/operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Character sets and collations](/character-set-and-collation.md) [^1] | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [User-level lock](/functions-and-operators/locking-functions.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | +| Data types, functions, and operators | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [Numeric types](/data-type-numeric.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Date and time types](/data-type-date-and-time.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [String types](/data-type-string.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [JSON type](/data-type-json.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | E | +| [Control flow functions](/functions-and-operators/control-flow-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [String functions](/functions-and-operators/string-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Numeric functions and operators](/functions-and-operators/numeric-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Date and time functions](/functions-and-operators/date-and-time-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Bit functions and operators](/functions-and-operators/bit-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Cast functions and operators](/functions-and-operators/cast-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Encryption and compression functions](/functions-and-operators/encryption-and-compression-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Information functions](/functions-and-operators/information-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [JSON functions](/functions-and-operators/json-functions.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | E | +| [Aggregation functions](/functions-and-operators/aggregate-group-by-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Window functions](/functions-and-operators/window-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Miscellaneous functions](/functions-and-operators/miscellaneous-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Operators](/functions-and-operators/operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Character sets and collations](/character-set-and-collation.md) [^1] | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [User-level lock](/functions-and-operators/locking-functions.md) | Y | Y | Y | Y | Y | Y | Y | N | N | N | N | ## Indexing and constraints -| Indexing and constraints | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [Expression indexes](/sql-statements/sql-statement-create-index.md#expression-index) [^2] | Y | Y | Y | Y | Y | E | E | E | E | E | -| [Columnar storage (TiFlash)](/tiflash/tiflash-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Use FastScan to accelerate queries in OLAP scenarios](/tiflash/use-fastscan.md) | Y | Y | Y | Y | E | N | N | N | N | N | -| [RocksDB engine](/storage-engine/rocksdb-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Titan plugin](/storage-engine/titan-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Titan Level Merge](/storage-engine/titan-configuration.md#level-merge-experimental) | E | E | E | E | E | E | E | E | E | E | -| [Use buckets to improve scan concurrency](/tune-region-performance.md#use-bucket-to-increase-concurrency) | E | E | E | E | E | E | N | N | N | N | -| [Invisible indexes](/sql-statements/sql-statement-create-index.md#invisible-index) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Composite `PRIMARY KEY`](/constraints.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [`CHECK` constraints](/constraints.md#check) | Y | Y | Y | N | N | N | N | N | N | N | -| [Unique indexes](/constraints.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Clustered index on integer `PRIMARY KEY`](/clustered-indexes.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Clustered index on composite or non-integer key](/clustered-indexes.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Multi-valued indexes](/sql-statements/sql-statement-create-index.md#multi-valued-indexes) | Y | Y | Y | Y | N | N | N | N | N | N | -| [Foreign key](/constraints.md#foreign-key) | E | E | E | E | N | N | N | N | N | N | -| [TiFlash late materialization](/tiflash/tiflash-late-materialization.md) | Y | Y | Y | Y | N | N | N | N | N | N | +| Indexing and constraints | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [Expression indexes](/sql-statements/sql-statement-create-index.md#expression-index) [^2] | Y | Y | Y | Y | Y | Y | E | E | E | E | E | +| [Columnar storage (TiFlash)](/tiflash/tiflash-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Use FastScan to accelerate queries in OLAP scenarios](/tiflash/use-fastscan.md) | Y | Y | Y | Y | Y | E | N | N | N | N | N | +| [RocksDB engine](/storage-engine/rocksdb-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Titan plugin](/storage-engine/titan-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Titan Level Merge](/storage-engine/titan-configuration.md#level-merge-experimental) | E | E | E | E | E | E | E | E | E | E | E | +| [Use buckets to improve scan concurrency](/tune-region-performance.md#use-bucket-to-increase-concurrency) | E | E | E | E | E | E | E | N | N | N | N | +| [Invisible indexes](/sql-statements/sql-statement-create-index.md#invisible-index) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Composite `PRIMARY KEY`](/constraints.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [`CHECK` constraints](/constraints.md#check) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [Unique indexes](/constraints.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Clustered index on integer `PRIMARY KEY`](/clustered-indexes.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Clustered index on composite or non-integer key](/clustered-indexes.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Multi-valued indexes](/sql-statements/sql-statement-create-index.md#multi-valued-indexes) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [Foreign key](/constraints.md#foreign-key) | E | E | E | E | E | N | N | N | N | N | N | +| [TiFlash late materialization](/tiflash/tiflash-late-materialization.md) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [Global index](/partitioned-table.md#global-indexes) | E | N | N | N | N | N | N | N | N | N | N | ## SQL statements -| SQL statements [^3] | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| Basic `SELECT`, `INSERT`, `UPDATE`, `DELETE`, `REPLACE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| `INSERT ON DUPLICATE KEY UPDATE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| `LOAD DATA INFILE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| `SELECT INTO OUTFILE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| `INNER JOIN`, LEFT\|RIGHT [OUTER] JOIN | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| `UNION`, `UNION ALL` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [`EXCEPT` and `INTERSECT` operators](/functions-and-operators/set-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| `GROUP BY`, `ORDER BY` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Window Functions](/functions-and-operators/window-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Common Table Expressions (CTE)](/sql-statements/sql-statement-with.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| `START TRANSACTION`, `COMMIT`, `ROLLBACK` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [`EXPLAIN`](/sql-statements/sql-statement-explain.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [User-defined variables](/user-defined-variables.md) | E | E | E | E | E | E | E | E | E | E | -| [`BATCH [ON COLUMN] LIMIT INTEGER DELETE`](/sql-statements/sql-statement-batch.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | -| [`BATCH [ON COLUMN] LIMIT INTEGER INSERT/UPDATE/REPLACE`](/sql-statements/sql-statement-batch.md) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [`ALTER TABLE ... COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) | Y | Y | Y | Y | Y | E | N | N | N | N | -| [Table Lock](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) | E | E | E | E | E | E | E | E | E | E | -| [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) | Y | Y | Y | Y | E | N | N | N | N | N | +| SQL statements [^3] | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| Basic `SELECT`, `INSERT`, `UPDATE`, `DELETE`, `REPLACE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| `INSERT ON DUPLICATE KEY UPDATE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| `LOAD DATA INFILE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| `SELECT INTO OUTFILE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| `INNER JOIN`, LEFT\|RIGHT [OUTER] JOIN | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| `UNION`, `UNION ALL` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [`EXCEPT` and `INTERSECT` operators](/functions-and-operators/set-operators.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| `GROUP BY`, `ORDER BY` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [`GROUP BY` modifier](/functions-and-operators/group-by-modifier.md) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [Window Functions](/functions-and-operators/window-functions.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Common Table Expressions (CTE)](/sql-statements/sql-statement-with.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| `START TRANSACTION`, `COMMIT`, `ROLLBACK` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [`EXPLAIN`](/sql-statements/sql-statement-explain.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [User-defined variables](/user-defined-variables.md) | E | E | E | E | E | E | E | E | E | E | E | +| [`BATCH [ON COLUMN] LIMIT INTEGER DELETE`](/sql-statements/sql-statement-batch.md) | Y | Y | Y | Y | Y | Y | Y | N | N | N | N | +| [`BATCH [ON COLUMN] LIMIT INTEGER INSERT/UPDATE/REPLACE`](/sql-statements/sql-statement-batch.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [`ALTER TABLE ... COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) | Y | Y | Y | Y | Y | Y | E | N | N | N | N | +| [Table Lock](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) | E | E | E | E | E | E | E | E | E | E | E | +| [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) | Y | Y | Y | Y | Y | E | N | N | N | N | N | ## Advanced SQL features -| Advanced SQL features | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [Prepared statement cache](/sql-prepared-plan-cache.md) | Y | Y | Y | Y | Y | Y | Y | Y | E | E | -| [Non-prepared statement cache](/sql-non-prepared-plan-cache.md) | Y | Y | Y | E | N | N | N | N | N | N | -| [SQL binding](/sql-plan-management.md#sql-binding) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Cross-database binding](/sql-plan-management.md#cross-database-binding) | Y | Y | N | N | N | N | N | N | N | N | -| [Create bindings according to historical execution plans](/sql-plan-management.md#create-a-binding-according-to-a-historical-execution-plan) | Y | Y | Y | Y | E | N | N | N | N | N | -| [Coprocessor cache](/coprocessor-cache.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Stale Read](/stale-read.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Follower reads](/follower-read.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Read historical data (tidb_snapshot)](/read-historical-data.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Optimizer hints](/optimizer-hints.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [MPP execution engine](/explain-mpp.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [MPP execution engine - compression exchange](/explain-mpp.md#mpp-version-and-exchange-data-compression) | Y | Y | Y | Y | N | N | N | N | N | N | -| [TiFlash Pipeline Model](/tiflash/tiflash-pipeline-model.md) | Y | Y | Y | N | N | N | N | N | N | N | -| [TiFlash replica selection strategy](/system-variables.md#tiflash_replica_read-new-in-v730) | Y | Y | Y | N | N | N | N | N | N | N | -| [Index Merge](/explain-index-merge.md) | Y | Y | Y | Y | Y | Y | Y | E | E | E | -| [Placement Rules in SQL](/placement-rules-in-sql.md) | Y | Y | Y | Y | Y | Y | E | E | N | N | -| [Cascades Planner](/system-variables.md#tidb_enable_cascades_planner) | E | E | E | E | E | E | E | E | E | E | -| [Runtime Filter](/runtime-filter.md) | Y | Y | Y | N | N | N | N | N | N | N | +| Advanced SQL features | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [Prepared statement cache](/sql-prepared-plan-cache.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | E | E | +| [Non-prepared statement cache](/sql-non-prepared-plan-cache.md) | Y | Y | Y | Y | E | N | N | N | N | N | N | +| [SQL binding](/sql-plan-management.md#sql-binding) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Cross-database binding](/sql-plan-management.md#cross-database-binding) | Y | Y | Y | N | N | N | N | N | N | N | N | +| [Create bindings according to historical execution plans](/sql-plan-management.md#create-a-binding-according-to-a-historical-execution-plan) | Y | Y | Y | Y | Y | E | N | N | N | N | N | +| [Coprocessor cache](/coprocessor-cache.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Stale Read](/stale-read.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Follower reads](/follower-read.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Read historical data (tidb_snapshot)](/read-historical-data.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Optimizer hints](/optimizer-hints.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [MPP execution engine](/explain-mpp.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [MPP execution engine - compression exchange](/explain-mpp.md#mpp-version-and-exchange-data-compression) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [TiFlash Pipeline Model](/tiflash/tiflash-pipeline-model.md) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [TiFlash replica selection strategy](/system-variables.md#tiflash_replica_read-new-in-v730) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [Index Merge](/explain-index-merge.md) | Y | Y | Y | Y | Y | Y | Y | Y | E | E | E | +| [Placement Rules in SQL](/placement-rules-in-sql.md) | Y | Y | Y | Y | Y | Y | Y | E | E | N | N | +| [Cascades Planner](/system-variables.md#tidb_enable_cascades_planner) | E | E | E | E | E | E | E | E | E | E | E | +| [Runtime Filter](/runtime-filter.md) | Y | Y | Y | Y | N | N | N | N | N | N | N | ## Data definition language (DDL) -| Data definition language (DDL) | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| Basic `CREATE`, `DROP`, `ALTER`, `RENAME`, `TRUNCATE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Generated columns](/generated-columns.md) | Y | Y | Y | Y | E | E | E | E | E | E | -| [Views](/views.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Sequences](/sql-statements/sql-statement-create-sequence.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Auto increment](/auto-increment.md) | Y | Y | Y | Y | Y[^4] | Y | Y | Y | Y | Y | -| [Auto random](/auto-random.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [TTL (Time to Live)](/time-to-live.md) | Y | Y | Y | Y | E | N | N | N | N | N | -| [DDL algorithm assertions](/sql-statements/sql-statement-alter-table.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| Multi-schema change: add columns | Y | Y | Y | Y | Y | E | E | E | E | E | -| [Change column type](/sql-statements/sql-statement-modify-column.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Temporary tables](/temporary-tables.md) | Y | Y | Y | Y | Y | Y | Y | Y | N | N | -| Concurrent DDL statements | Y | Y | Y | Y | Y | N | N | N | N | N | -| [Acceleration of `ADD INDEX` and `CREATE INDEX`](/system-variables.md#tidb_ddl_enable_fast_reorg-new-in-v630) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [Metadata lock](/metadata-lock.md) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [`FLASHBACK CLUSTER`](/sql-statements/sql-statement-flashback-cluster.md) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [Pause](/sql-statements/sql-statement-admin-pause-ddl.md)/[Resume](/sql-statements/sql-statement-admin-resume-ddl.md) DDL | Y | Y | Y | N | N | N | N | N | N | N | -| [TiDB Accelerated Table Creation](/accelerated-table-creation.md) | E | E | N | N | N | N | N | N | N | N | -| [Configure BDR role to replicate DDL statements in BDR mode](/sql-statements/sql-statement-admin-bdr-role.md#admin-setshowunset-bdr-role) | E | E | N | N | N | N | N | N | N | N | +| Data definition language (DDL) | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| Basic `CREATE`, `DROP`, `ALTER`, `RENAME`, `TRUNCATE` | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Generated columns](/generated-columns.md) | Y | Y | Y | Y | Y | E | E | E | E | E | E | +| [Views](/views.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Sequences](/sql-statements/sql-statement-create-sequence.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Auto increment](/auto-increment.md) | Y | Y | Y | Y | Y | Y[^4] | Y | Y | Y | Y | Y | +| [Auto random](/auto-random.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [TTL (Time to Live)](/time-to-live.md) | Y | Y | Y | Y | Y | E | N | N | N | N | N | +| [DDL algorithm assertions](/sql-statements/sql-statement-alter-table.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| Multi-schema change: add columns | Y | Y | Y | Y | Y | Y | E | E | E | E | E | +| [Change column type](/sql-statements/sql-statement-modify-column.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Temporary tables](/temporary-tables.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | N | N | +| Concurrent DDL statements | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [Acceleration of `ADD INDEX` and `CREATE INDEX`](/system-variables.md#tidb_ddl_enable_fast_reorg-new-in-v630) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [Metadata lock](/metadata-lock.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [`FLASHBACK CLUSTER`](/sql-statements/sql-statement-flashback-cluster.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [Pause](/sql-statements/sql-statement-admin-pause-ddl.md)/[Resume](/sql-statements/sql-statement-admin-resume-ddl.md) DDL | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [TiDB Accelerated Table Creation](/accelerated-table-creation.md) | E | E | E | N | N | N | N | N | N | N | N | +| [Configure BDR role to replicate DDL statements in BDR mode](/sql-statements/sql-statement-admin-bdr-role.md#admin-setshowunset-bdr-role) | Y | E | E | N | N | N | N | N | N | N | N | ## Transactions -| Transactions | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [Async commit](/system-variables.md#tidb_enable_async_commit-new-in-v50) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [1PC](/system-variables.md#tidb_enable_1pc-new-in-v50) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Large transactions (10GB)](/transaction-overview.md#transaction-size-limit) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Pessimistic transactions](/pessimistic-transaction.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Optimistic transactions](/optimistic-transaction.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Repeatable-read isolation (snapshot isolation)](/transaction-isolation-levels.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Read-committed isolation](/transaction-isolation-levels.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Automatically terminating long-running idle transactions](/system-variables.md#tidb_idle_transaction_timeout-new-in-v760) | Y | Y | N | N | N | N | N | N | N | N | +| Transactions | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [Async commit](/system-variables.md#tidb_enable_async_commit-new-in-v50) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [1PC](/system-variables.md#tidb_enable_1pc-new-in-v50) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Large transactions (10GB)](/transaction-overview.md#transaction-size-limit) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Pessimistic transactions](/pessimistic-transaction.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Optimistic transactions](/optimistic-transaction.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Repeatable-read isolation (snapshot isolation)](/transaction-isolation-levels.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Read-committed isolation](/transaction-isolation-levels.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Automatically terminating long-running idle transactions](/system-variables.md#tidb_idle_transaction_timeout-new-in-v760) | Y | Y | Y | N | N | N | N | N | N | N | N | ## Partitioning -| Partitioning | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [Range partitioning](/partitioned-table.md#range-partitioning) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Hash partitioning](/partitioned-table.md#hash-partitioning) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Key partitioning](/partitioned-table.md#key-partitioning) | Y | Y | Y | Y | N | N | N | N | N | N | -| [List partitioning](/partitioned-table.md#list-partitioning) | Y | Y | Y | Y | Y | Y | E | E | E | E | -| [List COLUMNS partitioning](/partitioned-table.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | -| [Default partition for List and List COLUMNS partitioned tables](/partitioned-table.md#default-list-partition) | Y | Y | Y | N | N | N | N | N | N | N | -| [`EXCHANGE PARTITION`](/partitioned-table.md) | Y | Y | Y | Y | Y | E | E | E | E | E | -| [`REORGANIZE PARTITION`](/partitioned-table.md#reorganize-partitions) | Y | Y | Y | Y | N | N | N | N | N | N | -| [`COALESCE PARTITION`](/partitioned-table.md#decrease-the-number-of-partitions) | Y | Y | Y | Y | N | N | N | N | N | N | -| [Dynamic pruning](/partitioned-table.md#dynamic-pruning-mode) | Y | Y | Y | Y | Y | Y | E | E | E | E | -| [Range COLUMNS partitioning](/partitioned-table.md#range-columns-partitioning) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [Range INTERVAL partitioning](/partitioned-table.md#range-interval-partitioning) | Y | Y | Y | Y | E | N | N | N | N | N | -| [Convert a partitioned table to a non-partitioned table](/partitioned-table.md#convert-a-partitioned-table-to-a-non-partitioned-table) | Y | Y | Y | N | N | N | N | N | N | N | -| [Partition an existing table](/partitioned-table.md#partition-an-existing-table) | Y | Y | Y | N | N | N | N | N | N | N | +| Partitioning | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [Range partitioning](/partitioned-table.md#range-partitioning) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Hash partitioning](/partitioned-table.md#hash-partitioning) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Key partitioning](/partitioned-table.md#key-partitioning) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [List partitioning](/partitioned-table.md#list-partitioning) | Y | Y | Y | Y | Y | Y | Y | E | E | E | E | +| [List COLUMNS partitioning](/partitioned-table.md) | Y | Y | Y | Y | Y | Y | Y | E | E | E | E | +| [Default partition for List and List COLUMNS partitioned tables](/partitioned-table.md#default-list-partition) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [`EXCHANGE PARTITION`](/partitioned-table.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | E | +| [`REORGANIZE PARTITION`](/partitioned-table.md#reorganize-partitions) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [`COALESCE PARTITION`](/partitioned-table.md#decrease-the-number-of-partitions) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [Dynamic pruning](/partitioned-table.md#dynamic-pruning-mode) | Y | Y | Y | Y | Y | Y | Y | E | E | E | E | +| [Range COLUMNS partitioning](/partitioned-table.md#range-columns-partitioning) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [Range INTERVAL partitioning](/partitioned-table.md#range-interval-partitioning) | Y | Y | Y | Y | Y | E | N | N | N | N | N | +| [Convert a partitioned table to a non-partitioned table](/partitioned-table.md#convert-a-partitioned-table-to-a-non-partitioned-table) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [Partition an existing table](/partitioned-table.md#partition-an-existing-table) | Y | Y | Y | Y | N | N | N | N | N | N | N | ## Statistics -| Statistics | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|---|---|---|---|---|---|---|---|---|---| -| [CMSketch](/statistics.md) | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Y | Y | -| [Histograms](/statistics.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Extended statistics](/extended-statistics.md) | E | E | E | E | E | E | E | E | E | E | -| Statistics feedback | N | N | N | N | N | Deprecated | Deprecated | E | E | E | -| [Automatically update statistics](/statistics.md#automatic-update) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Dynamic pruning](/partitioned-table.md#dynamic-pruning-mode) | Y | Y | Y | Y | Y | Y | E | E | E | E | -| [Collect statistics for `PREDICATE COLUMNS`](/statistics.md#collect-statistics-on-some-columns) | E | E | E | E | E | E | E | N | N | N | -| [Control the memory quota for collecting statistics](/statistics.md#the-memory-quota-for-collecting-statistics) | E | E | E | E | E | N | N | N | N | N | -| [Randomly sample about 10000 rows of data to quickly build statistics](/system-variables.md#tidb_enable_fast_analyze) | Deprecated | Deprecated | Deprecated | E | E | E | E | E | E | E | -| [Lock statistics](/statistics.md#lock-statistics) | Y | Y | Y | E | E | N | N | N | N | N | -| [Lightweight statistics initialization](/statistics.md#load-statistics) | Y | Y | Y | E | N | N | N | N | N | N | -| [Show the progress of collecting statistics](/sql-statements/sql-statement-show-analyze-status.md) | Y | Y | Y | N | N | N | N | N | N | N | +| Statistics | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|---|---|---|---|---|---|---|---|---|---|---| +| [CMSketch](/statistics.md) | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Disabled by default | Y | Y | +| [Histograms](/statistics.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Extended statistics](/extended-statistics.md) | E | E | E | E | E | E | E | E | E | E | E | +| Statistics feedback | N | N | N | N | N | N | Deprecated | Deprecated | E | E | E | +| [Automatically update statistics](/statistics.md#automatic-update) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Dynamic pruning](/partitioned-table.md#dynamic-pruning-mode) | Y | Y | Y | Y | Y | Y | Y | E | E | E | E | +| [Collect statistics for `PREDICATE COLUMNS`](/statistics.md#collect-statistics-on-some-columns) | Y | E | E | E | E | E | E | E | N | N | N | +| [Control the memory quota for collecting statistics](/statistics.md#the-memory-quota-for-collecting-statistics) | E | E | E | E | E | E | N | N | N | N | N | +| [Randomly sample about 10000 rows of data to quickly build statistics](/system-variables.md#tidb_enable_fast_analyze) | Deprecated | Deprecated | Deprecated | Deprecated | E | E | E | E | E | E | E | +| [Lock statistics](/statistics.md#lock-statistics) | Y | Y | Y | Y | E | E | N | N | N | N | N | +| [Lightweight statistics initialization](/statistics.md#load-statistics) | Y | Y | Y | Y | E | N | N | N | N | N | N | +| [Show the progress of collecting statistics](/sql-statements/sql-statement-show-analyze-status.md) | Y | Y | Y | Y | N | N | N | N | N | N | N | ## Security -| Security | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [Transparent layer security (TLS)](/enable-tls-between-clients-and-servers.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Encryption at rest (TDE)](/encryption-at-rest.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Role-based authentication (RBAC)](/role-based-access-control.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Certificate-based authentication](/certificate-authentication.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [`caching_sha2_password` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | Y | Y | Y | Y | Y | Y | N | -| [`tidb_sm3_password` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [`tidb_auth_token` authentication](/security-compatibility-with-mysql.md#tidb_auth_token) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [`authentication_ldap_sasl` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | N | N | N | N | N | N | N | -| [`authentication_ldap_simple` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | Y | N | N | N | N | N | N | -| [Password management](/password-management.md) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [MySQL compatible `GRANT` system](/privilege-management.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Dynamic Privileges](/privilege-management.md#dynamic-privileges) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Security Enhanced Mode](/system-variables.md#tidb_enable_enhanced_security) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Redacted Log Files](/log-redaction.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| Security | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [Transparent layer security (TLS)](/enable-tls-between-clients-and-servers.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Encryption at rest (TDE)](/encryption-at-rest.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Role-based authentication (RBAC)](/role-based-access-control.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Certificate-based authentication](/certificate-authentication.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [`caching_sha2_password` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | N | +| [`tidb_sm3_password` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [`tidb_auth_token` authentication](/security-compatibility-with-mysql.md#tidb_auth_token) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [`authentication_ldap_sasl` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| [`authentication_ldap_simple` authentication](/system-variables.md#default_authentication_plugin) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [Password management](/password-management.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [MySQL compatible `GRANT` system](/privilege-management.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Dynamic Privileges](/privilege-management.md#dynamic-privileges) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Security Enhanced Mode](/system-variables.md#tidb_enable_enhanced_security) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Redacted Log Files](/log-redaction.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | ## Data import and export -| Data import and export | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|---|---|---|---|---|---|---|---|---|---| -| [Fast import using TiDB Lightning](/tidb-lightning/tidb-lightning-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Fast import using the `IMPORT INTO` statement](/sql-statements/sql-statement-import-into.md) | Y | Y | Y | N | N | N | N | N | N | N | -| mydumper logical dumper | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | -| [Dumpling logical dumper](/dumpling-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Transactional `LOAD DATA`](/sql-statements/sql-statement-load-data.md) [^5] | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Database migration toolkit (DM)](/migration-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) [^6] | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Change data capture (CDC)](/ticdc/ticdc-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Stream data to Amazon S3, GCS, Azure Blob Storage, and NFS through TiCDC](/ticdc/ticdc-sink-to-cloud-storage.md) | Y | Y | Y | Y | E | N | N | N | N | N | -| [TiCDC supports bidirectional replication between two TiDB clusters](/ticdc/ticdc-bidirectional-replication.md) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [TiCDC OpenAPI v2](/ticdc/ticdc-open-api-v2.md) | Y | Y | Y | Y | N | N | N | N | N | N | -| [DM](/dm/dm-overview.md) supports migrating MySQL 8.0 | Y | Y | E | E | E | E | N | N | N | N | +| Data import and export | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|---|---|---|---|---|---|---|---|---|---|---| +| [Fast import using TiDB Lightning](/tidb-lightning/tidb-lightning-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Fast import using the `IMPORT INTO` statement](/sql-statements/sql-statement-import-into.md) | Y | Y | Y | Y | N | N | N | N | N | N | N | +| mydumper logical dumper | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | Deprecated | +| [Dumpling logical dumper](/dumpling-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Transactional `LOAD DATA`](/sql-statements/sql-statement-load-data.md) [^5] | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Database migration toolkit (DM)](/migration-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) [^6] | Deprecated | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Change data capture (CDC)](/ticdc/ticdc-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Stream data to Amazon S3, GCS, Azure Blob Storage, and NFS through TiCDC](/ticdc/ticdc-sink-to-cloud-storage.md) | Y | Y | Y | Y | Y | E | N | N | N | N | N | +| [TiCDC supports bidirectional replication between two TiDB clusters](/ticdc/ticdc-bidirectional-replication.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [TiCDC OpenAPI v2](/ticdc/ticdc-open-api-v2.md) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [DM](/dm/dm-overview.md) supports migrating MySQL 8.0 | Y | Y | Y | E | E | E | E | N | N | N | N | ## Management, observability, and tools -| Management, observability, and tools | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | -|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [TiDB Dashboard UI](/dashboard/dashboard-intro.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [TiDB Dashboard Continuous Profiling](/dashboard/continuous-profiling.md) | Y | Y | Y | Y | Y | Y | E | E | N | N | -| [TiDB Dashboard Top SQL](/dashboard/top-sql.md) | Y | Y | Y | Y | Y | Y | E | N | N | N | -| [TiDB Dashboard SQL Diagnostics](/information-schema/information-schema-sql-diagnostics.md) | Y | Y | Y | Y | Y | E | E | E | E | E | -| [TiDB Dashboard Cluster Diagnostics](/dashboard/dashboard-diagnostics-access.md) | Y | Y | Y | Y | Y | E | E | E | E | E | -| [TiKV-FastTune dashboard](/grafana-tikv-dashboard.md#tikv-fasttune-dashboard) | E | E | E | E | E | E | E | E | E | E | -| [Information schema](/information-schema/information-schema.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Metrics schema](/metrics-schema.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Statements summary tables](/statement-summary-tables.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Statements summary tables - summary persistence](/statement-summary-tables.md#persist-statements-summary) | E | E | E | E | N | N | N | N | N | N | -| [Slow query log](/identify-slow-queries.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [TiUP deployment](/tiup/tiup-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Kubernetes operator](https://docs.pingcap.com/tidb-in-kubernetes/) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Built-in physical backup](/br/backup-and-restore-use-cases.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [Global Kill](/sql-statements/sql-statement-kill.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | -| [Lock View](/information-schema/information-schema-data-lock-waits.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | E | -| [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | -| [`SET CONFIG`](/dynamic-config.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | -| [DM WebUI](/dm/dm-webui-guide.md) | E | E | E | E | E | E | N | N | N | N | -| [Foreground Quota Limiter](/tikv-configuration-file.md#foreground-quota-limiter) | Y | Y | Y | Y | Y | E | N | N | N | N | -| [Background Quota Limiter](/tikv-configuration-file.md#background-quota-limiter) | E | E | E | E | E | N | N | N | N | N | -| [EBS volume snapshot backup and restore](https://docs.pingcap.com/tidb-in-kubernetes/v1.4/backup-to-aws-s3-by-snapshot) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [PITR](/br/backup-and-restore-overview.md) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [Global memory control](/configure-memory-usage.md#configure-the-memory-usage-threshold-of-a-tidb-server-instance) | Y | Y | Y | Y | Y | N | N | N | N | N | -| [Cross-cluster RawKV replication](/tikv-configuration-file.md#api-version-new-in-v610) | E | E | E | E | E | N | N | N | N | N | -| [Green GC](/system-variables.md#tidb_gc_scan_lock_mode-new-in-v50) | E | E | E | E | E | E | E | E | E | E | -| [Resource control](/tidb-resource-control.md) | Y | Y | Y | Y | N | N | N | N | N | N | -| [Runaway Queries management](/tidb-resource-control.md#manage-queries-that-consume-more-resources-than-expected-runaway-queries) | Y | Y | E | N | N | N | N | N | N | N | -| [Background tasks management](/tidb-resource-control.md#manage-background-tasks) | E | E | E | N | N | N | N | N | N | N | -| [TiFlash Disaggregated Storage and Compute Architecture and S3 Support](/tiflash/tiflash-disaggregated-and-s3.md) | Y | Y | Y | E | N | N | N | N | N | N | +| Management, observability, and tools | 8.3 | 8.2 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | 5.3 | 5.2 | 5.1 | +|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [TiDB Dashboard UI](/dashboard/dashboard-intro.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [TiDB Dashboard Continuous Profiling](/dashboard/continuous-profiling.md) | Y | Y | Y | Y | Y | Y | Y | E | E | N | N | +| [TiDB Dashboard Top SQL](/dashboard/top-sql.md) | Y | Y | Y | Y | Y | Y | Y | E | N | N | N | +| [TiDB Dashboard SQL Diagnostics](/information-schema/information-schema-sql-diagnostics.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | E | +| [TiDB Dashboard Cluster Diagnostics](/dashboard/dashboard-diagnostics-access.md) | Y | Y | Y | Y | Y | Y | E | E | E | E | E | +| [TiKV-FastTune dashboard](/grafana-tikv-dashboard.md#tikv-fasttune-dashboard) | E | E | E | E | E | E | E | E | E | E | E | +| [Information schema](/information-schema/information-schema.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Metrics schema](/metrics-schema.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Statements summary tables](/statement-summary-tables.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Statements summary tables - summary persistence](/statement-summary-tables.md#persist-statements-summary) | E | E | E | E | E | N | N | N | N | N | N | +| [Slow query log](/identify-slow-queries.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [TiUP deployment](/tiup/tiup-overview.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Kubernetes operator](https://docs.pingcap.com/tidb-in-kubernetes/) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Built-in physical backup](/br/backup-and-restore-use-cases.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [Global Kill](/sql-statements/sql-statement-kill.md) | Y | Y | Y | Y | Y | Y | Y | E | E | E | E | +| [Lock View](/information-schema/information-schema-data-lock-waits.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | E | +| [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | Y | +| [`SET CONFIG`](/dynamic-config.md) | Y | Y | Y | Y | Y | Y | Y | E | E | E | E | +| [DM WebUI](/dm/dm-webui-guide.md) | E | E | E | E | E | E | E | N | N | N | N | +| [Foreground Quota Limiter](/tikv-configuration-file.md#foreground-quota-limiter) | Y | Y | Y | Y | Y | Y | E | N | N | N | N | +| [Background Quota Limiter](/tikv-configuration-file.md#background-quota-limiter) | E | E | E | E | E | E | N | N | N | N | N | +| [EBS volume snapshot backup and restore](https://docs.pingcap.com/tidb-in-kubernetes/v1.4/backup-to-aws-s3-by-snapshot) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [PITR](/br/backup-and-restore-overview.md) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [Global memory control](/configure-memory-usage.md#configure-the-memory-usage-threshold-of-a-tidb-server-instance) | Y | Y | Y | Y | Y | Y | N | N | N | N | N | +| [Cross-cluster RawKV replication](/tikv-configuration-file.md#api-version-new-in-v610) | E | E | E | E | E | E | N | N | N | N | N | +| [Green GC](/system-variables.md#tidb_gc_scan_lock_mode-new-in-v50) | E | E | E | E | E | E | E | E | E | E | E | +| [Resource control](/tidb-resource-control.md) | Y | Y | Y | Y | Y | N | N | N | N | N | N | +| [Runaway Queries management](/tidb-resource-control.md#manage-queries-that-consume-more-resources-than-expected-runaway-queries) | Y | Y | Y | E | N | N | N | N | N | N | N | +| [Background tasks management](/tidb-resource-control.md#manage-background-tasks) | E | E | E | E | N | N | N | N | N | N | N | +| [TiFlash Disaggregated Storage and Compute Architecture and S3 Support](/tiflash/tiflash-disaggregated-and-s3.md) | Y | Y | Y | Y | E | N | N | N | N | N | N | | [Selecting TiDB nodes for the Distributed eXecution Framework (DXF) tasks](/system-variables.md#tidb_service_scope-new-in-v740) | Y | Y | Y | N | N | N | N | N | N | N | -| PD Follower Proxy (controlled by [`tidb_enable_tso_follower_proxy`](/system-variables.md#tidb_enable_tso_follower_proxy-new-in-v530)) | Y | Y | Y | Y | Y | Y | Y | Y | N | N | -| [Active PD Follower](/tune-region-performance.md#use-the-active-pd-follower-feature-to-enhance-the-scalability-of-pds-region-information-query-service) (controlled by [`pd_enable_follower_handle_region`](/system-variables.md#pd_enable_follower_handle_region-new-in-v760)) | E | E | N | N | N | N | N | N | N | N | -| [PD microservices](/pd-microservices.md) | E | E | N | N | N | N | N | N | N | N | -| [TiDB Distributed eXecution Framework (DXF)](/tidb-distributed-execution-framework.md) | Y | Y | Y | E | N | N | N | N | N | N | -| [Global Sort](/tidb-global-sort.md) | Y | Y | E | N | N | N | N | N | N | N | -| [TiProxy](/tiproxy/tiproxy-overview.md) | Y | Y | N | N | N | N | N | N | N | N | +| PD Follower Proxy (controlled by [`tidb_enable_tso_follower_proxy`](/system-variables.md#tidb_enable_tso_follower_proxy-new-in-v530)) | Y | Y | Y | Y | Y | Y | Y | Y | Y | N | N | +| [Active PD Follower](/tune-region-performance.md#use-the-active-pd-follower-feature-to-enhance-the-scalability-of-pds-region-information-query-service) (controlled by [`pd_enable_follower_handle_region`](/system-variables.md#pd_enable_follower_handle_region-new-in-v760)) | E | E | E | N | N | N | N | N | N | N | N | +| [PD microservices](/pd-microservices.md) | E | E | E | N | N | N | N | N | N | N | N | +| [TiDB Distributed eXecution Framework (DXF)](/tidb-distributed-execution-framework.md) | Y | Y | Y | Y | E | N | N | N | N | N | N | +| [Global Sort](/tidb-global-sort.md) | Y | Y | Y | E | N | N | N | N | N | N | N | +| [TiProxy](/tiproxy/tiproxy-overview.md) | Y | Y | Y | N | N | N | N | N | N | N | N | [^1]: TiDB incorrectly treats latin1 as a subset of utf8. See [TiDB #18955](https://github.com/pingcap/tidb/issues/18955) for more details. From 20f9dd18523353d845af96cd0e7de7a2e75081d7 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Aug 2024 11:22:43 +0800 Subject: [PATCH 42/44] v8.3: bump the latest TiDB version to v8.3 (#18626) --- br/backup-and-restore-use-cases.md | 6 ++-- ...-guide-sample-application-nodejs-mysql2.md | 2 +- ...guide-sample-application-nodejs-mysqljs.md | 2 +- ...-guide-sample-application-nodejs-prisma.md | 2 +- ...guide-sample-application-nodejs-typeorm.md | 2 +- ...ev-guide-sample-application-ruby-mysql2.md | 2 +- ...dev-guide-sample-application-ruby-rails.md | 2 +- dm/maintain-dm-using-tiup.md | 2 +- dm/quick-start-create-task.md | 2 +- download-ecosystem-tools.md | 2 +- functions-and-operators/tidb-functions.md | 2 +- .../information-schema-tidb-servers-info.md | 2 +- pd-control.md | 2 +- ...erformance-schema-session-connect-attrs.md | 2 +- post-installation-check.md | 2 +- production-deployment-using-tiup.md | 6 ++-- quick-start-with-tidb.md | 10 +++--- scale-microservices-using-tiup.md | 2 +- scale-tidb-using-tiup.md | 2 +- system-variables.md | 2 +- ticdc/deploy-ticdc.md | 4 +-- ticdc/ticdc-changefeed-config.md | 2 +- ticdc/ticdc-changefeed-overview.md | 2 +- ticdc/ticdc-manage-changefeed.md | 2 +- ticdc/ticdc-open-api-v2.md | 2 +- ticdc/ticdc-sink-to-cloud-storage.md | 2 +- ticdc/ticdc-sink-to-pulsar.md | 2 +- tidb-binlog/get-started-with-tidb-binlog.md | 2 +- tidb-monitoring-api.md | 2 +- tiflash/create-tiflash-replicas.md | 4 +-- tiup/tiup-cluster.md | 30 ++++++++-------- tiup/tiup-component-cluster-deploy.md | 2 +- tiup/tiup-component-cluster-patch.md | 2 +- tiup/tiup-component-cluster-upgrade.md | 2 +- tiup/tiup-component-dm-upgrade.md | 2 +- tiup/tiup-component-management.md | 12 +++---- tiup/tiup-mirror.md | 6 ++-- tiup/tiup-playground.md | 4 +-- upgrade-monitoring-services.md | 4 +-- upgrade-tidb-using-tiup.md | 36 +++++++++---------- 40 files changed, 90 insertions(+), 90 deletions(-) diff --git a/br/backup-and-restore-use-cases.md b/br/backup-and-restore-use-cases.md index ae2a7b521b461..0634e2d34393b 100644 --- a/br/backup-and-restore-use-cases.md +++ b/br/backup-and-restore-use-cases.md @@ -17,7 +17,7 @@ With PITR, you can satisfy the preceding requirements. ## Deploy the TiDB cluster and BR -To use PITR, you need to deploy a TiDB cluster >= v6.2.0 and update BR to the same version as the TiDB cluster. This document uses v8.2.0 as an example. +To use PITR, you need to deploy a TiDB cluster >= v6.2.0 and update BR to the same version as the TiDB cluster. This document uses v8.3.0 as an example. The following table shows the recommended hardware resources for using PITR in a TiDB cluster. @@ -44,13 +44,13 @@ Install or upgrade BR using TiUP: - Install: ```shell - tiup install br:v8.2.0 + tiup install br:v8.3.0 ``` - Upgrade: ```shell - tiup update br:v8.2.0 + tiup update br:v8.3.0 ``` ## Configure backup storage (Amazon S3) diff --git a/develop/dev-guide-sample-application-nodejs-mysql2.md b/develop/dev-guide-sample-application-nodejs-mysql2.md index ba5fa6a2b98d1..88585ed59c314 100644 --- a/develop/dev-guide-sample-application-nodejs-mysql2.md +++ b/develop/dev-guide-sample-application-nodejs-mysql2.md @@ -189,7 +189,7 @@ npm start If the connection is successful, the console will output the version of the TiDB cluster as follows: ``` -🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.2.0) +🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.3.0) ⏳ Loading sample game data... ✅ Loaded sample game data. diff --git a/develop/dev-guide-sample-application-nodejs-mysqljs.md b/develop/dev-guide-sample-application-nodejs-mysqljs.md index 46ae08c3dd3cd..a092da1b127bb 100644 --- a/develop/dev-guide-sample-application-nodejs-mysqljs.md +++ b/develop/dev-guide-sample-application-nodejs-mysqljs.md @@ -189,7 +189,7 @@ npm start If the connection is successful, the console will output the version of the TiDB cluster as follows: ``` -🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.2.0) +🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.3.0) ⏳ Loading sample game data... ✅ Loaded sample game data. diff --git a/develop/dev-guide-sample-application-nodejs-prisma.md b/develop/dev-guide-sample-application-nodejs-prisma.md index 557a04eb6994a..08f7692d9534d 100644 --- a/develop/dev-guide-sample-application-nodejs-prisma.md +++ b/develop/dev-guide-sample-application-nodejs-prisma.md @@ -268,7 +268,7 @@ void main(); If the connection is successful, the terminal will output the version of the TiDB cluster as follows: ``` -🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.2.0) +🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.3.0) 🆕 Created a new player with ID 1. ℹ️ Got Player 1: Player { id: 1, coins: 100, goods: 100 } 🔢 Added 50 coins and 50 goods to player 1, now player 1 has 150 coins and 150 goods. diff --git a/develop/dev-guide-sample-application-nodejs-typeorm.md b/develop/dev-guide-sample-application-nodejs-typeorm.md index e2167e08fd83d..9c1ae1ec490ff 100644 --- a/develop/dev-guide-sample-application-nodejs-typeorm.md +++ b/develop/dev-guide-sample-application-nodejs-typeorm.md @@ -231,7 +231,7 @@ npm start If the connection is successful, the terminal will output the version of the TiDB cluster as follows: ``` -🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.2.0) +🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.3.0) 🆕 Created a new player with ID 2. ℹ️ Got Player 2: Player { id: 2, coins: 100, goods: 100 } 🔢 Added 50 coins and 50 goods to player 2, now player 2 has 100 coins and 150 goods. diff --git a/develop/dev-guide-sample-application-ruby-mysql2.md b/develop/dev-guide-sample-application-ruby-mysql2.md index 2baa615236866..80c685683d9bc 100644 --- a/develop/dev-guide-sample-application-ruby-mysql2.md +++ b/develop/dev-guide-sample-application-ruby-mysql2.md @@ -190,7 +190,7 @@ ruby app.rb If the connection is successful, the console will output the version of the TiDB cluster as follows: ``` -🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.2.0) +🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.3.0) ⏳ Loading sample game data... ✅ Loaded sample game data. diff --git a/develop/dev-guide-sample-application-ruby-rails.md b/develop/dev-guide-sample-application-ruby-rails.md index f0ca5c93a22ef..5e4dd284073a4 100644 --- a/develop/dev-guide-sample-application-ruby-rails.md +++ b/develop/dev-guide-sample-application-ruby-rails.md @@ -183,7 +183,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele If the connection is successful, the console will output the version of the TiDB cluster as follows: ``` -🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.2.0) +🔌 Connected to TiDB cluster! (TiDB version: 8.0.11-TiDB-v8.3.0) ⏳ Loading sample game data... ✅ Loaded sample game data. diff --git a/dm/maintain-dm-using-tiup.md b/dm/maintain-dm-using-tiup.md index a87fdc77287d7..c11fc35781c13 100644 --- a/dm/maintain-dm-using-tiup.md +++ b/dm/maintain-dm-using-tiup.md @@ -394,7 +394,7 @@ All operations above performed on the cluster machine use the SSH client embedde Then you can use the `--native-ssh` command-line flag to enable the system-native command-line tool: -- Deploy a cluster: `tiup dm deploy --native-ssh`. Fill in the name of your cluster for ``, the DM version to be deployed (such as `v8.2.0`) for `` , and the topology file name for ``. +- Deploy a cluster: `tiup dm deploy --native-ssh`. Fill in the name of your cluster for ``, the DM version to be deployed (such as `v8.3.0`) for `` , and the topology file name for ``. - Start a cluster: `tiup dm start --native-ssh`. - Upgrade a cluster: `tiup dm upgrade ... --native-ssh` diff --git a/dm/quick-start-create-task.md b/dm/quick-start-create-task.md index 1db041365a013..6d7b364746842 100644 --- a/dm/quick-start-create-task.md +++ b/dm/quick-start-create-task.md @@ -74,7 +74,7 @@ To run a TiDB server, use the following command: {{< copyable "shell-regular" >}} ```bash -wget https://download.pingcap.org/tidb-community-server-v8.2.0-linux-amd64.tar.gz +wget https://download.pingcap.org/tidb-community-server-v8.3.0-linux-amd64.tar.gz tar -xzvf tidb-latest-linux-amd64.tar.gz mv tidb-latest-linux-amd64/bin/tidb-server ./ ./tidb-server diff --git a/download-ecosystem-tools.md b/download-ecosystem-tools.md index 4fd54458b72f2..f9ba8e4869119 100644 --- a/download-ecosystem-tools.md +++ b/download-ecosystem-tools.md @@ -28,7 +28,7 @@ You can download TiDB Toolkit from the following link: https://download.pingcap.org/tidb-community-toolkit-{version}-linux-{arch}.tar.gz ``` -`{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.2.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.2.0-linux-amd64.tar.gz`. +`{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.3.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.3.0-linux-amd64.tar.gz`. > **Note:** > diff --git a/functions-and-operators/tidb-functions.md b/functions-and-operators/tidb-functions.md index cde9c29586db6..481641ef2f849 100644 --- a/functions-and-operators/tidb-functions.md +++ b/functions-and-operators/tidb-functions.md @@ -544,7 +544,7 @@ SELECT TIDB_VERSION()\G ```sql *************************** 1. row *************************** -TIDB_VERSION(): Release Version: v8.2.0 +TIDB_VERSION(): Release Version: v8.3.0 Edition: Community Git Commit Hash: 821e491a20fbab36604b36b647b5bae26a2c1418 Git Branch: HEAD diff --git a/information-schema/information-schema-tidb-servers-info.md b/information-schema/information-schema-tidb-servers-info.md index cec98f2c73846..bbe2f20d8c88a 100644 --- a/information-schema/information-schema-tidb-servers-info.md +++ b/information-schema/information-schema-tidb-servers-info.md @@ -50,7 +50,7 @@ The output is as follows: PORT: 4000 STATUS_PORT: 10080 LEASE: 45s - VERSION: 8.0.11-TiDB-v8.2.0 + VERSION: 8.0.11-TiDB-v8.3.0 GIT_HASH: 827d8ff2d22ac4c93ae1b841b79d468211e1d393 BINLOG_STATUS: Off LABELS: diff --git a/pd-control.md b/pd-control.md index 44046e03a932b..84de713dd83ed 100644 --- a/pd-control.md +++ b/pd-control.md @@ -29,7 +29,7 @@ To obtain `pd-ctl` of the latest version, download the TiDB server installation > **Note:** > -> `{version}` in the link indicates the version number of TiDB. For example, the download link for `v8.2.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-server-v8.2.0-linux-amd64.tar.gz`. +> `{version}` in the link indicates the version number of TiDB. For example, the download link for `v8.3.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-server-v8.3.0-linux-amd64.tar.gz`. ### Compile from source code diff --git a/performance-schema/performance-schema-session-connect-attrs.md b/performance-schema/performance-schema-session-connect-attrs.md index e92a327d6adf0..3005f2ab1f9a6 100644 --- a/performance-schema/performance-schema-session-connect-attrs.md +++ b/performance-schema/performance-schema-session-connect-attrs.md @@ -52,7 +52,7 @@ TABLE SESSION_CONNECT_ATTRS; | PROCESSLIST_ID | ATTR_NAME | ATTR_VALUE | ORDINAL_POSITION | +----------------+-----------------+------------+------------------+ | 2097154 | _client_name | libmysql | 0 | -| 2097154 | _client_version | 8.2.0 | 1 | +| 2097154 | _client_version | 8.3.0 | 1 | | 2097154 | _os | Linux | 2 | | 2097154 | _pid | 1299203 | 3 | | 2097154 | _platform | x86_64 | 4 | diff --git a/post-installation-check.md b/post-installation-check.md index dc5ab632e9e46..f70aa09d95cb2 100644 --- a/post-installation-check.md +++ b/post-installation-check.md @@ -63,7 +63,7 @@ The following information indicates successful login: ```sql Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 3 -Server version: 8.0.11-TiDB-v8.2.0 TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible +Server version: 8.0.11-TiDB-v8.3.0 TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective diff --git a/production-deployment-using-tiup.md b/production-deployment-using-tiup.md index d7f81b361cfd7..77d5284431b5c 100644 --- a/production-deployment-using-tiup.md +++ b/production-deployment-using-tiup.md @@ -95,7 +95,7 @@ https://download.pingcap.org/tidb-community-toolkit-{version}-linux-{arch}.tar.g > **Tip:** > -> `{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.2.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.2.0-linux-amd64.tar.gz`. +> `{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.3.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.3.0-linux-amd64.tar.gz`. **Method 2**: Manually pack an offline component package using `tiup mirror clone`. The detailed steps are as follows: @@ -346,13 +346,13 @@ Before you run the `deploy` command, use the `check` and `check --apply` command {{< copyable "shell-regular" >}} ```shell - tiup cluster deploy tidb-test v8.2.0 ./topology.yaml --user root [-p] [-i /home/root/.ssh/gcp_rsa] + tiup cluster deploy tidb-test v8.3.0 ./topology.yaml --user root [-p] [-i /home/root/.ssh/gcp_rsa] ``` In the `tiup cluster deploy` command above: - `tidb-test` is the name of the TiDB cluster to be deployed. -- `v8.2.0` is the version of the TiDB cluster to be deployed. You can see the latest supported versions by running `tiup list tidb`. +- `v8.3.0` is the version of the TiDB cluster to be deployed. You can see the latest supported versions by running `tiup list tidb`. - `topology.yaml` is the initialization configuration file. - `--user root` indicates logging into the target machine as the `root` user to complete the cluster deployment. The `root` user is expected to have `ssh` and `sudo` privileges to the target machine. Alternatively, you can use other users with `ssh` and `sudo` privileges to complete the deployment. - `[-i]` and `[-p]` are optional. If you have configured login to the target machine without password, these parameters are not required. If not, choose one of the two parameters. `[-i]` is the private key of the root user (or other users specified by `--user`) that has access to the target machine. `[-p]` is used to input the user password interactively. diff --git a/quick-start-with-tidb.md b/quick-start-with-tidb.md index 6c522053dacd3..14d17ce0183ba 100644 --- a/quick-start-with-tidb.md +++ b/quick-start-with-tidb.md @@ -81,10 +81,10 @@ As a distributed system, a basic TiDB test cluster usually consists of 2 TiDB in {{< copyable "shell-regular" >}} ```shell - tiup playground v8.2.0 --db 2 --pd 3 --kv 3 + tiup playground v8.3.0 --db 2 --pd 3 --kv 3 ``` - The command downloads a version cluster to the local machine and starts it, such as v8.2.0. To view the latest version, run `tiup list tidb`. + The command downloads a version cluster to the local machine and starts it, such as v8.3.0. To view the latest version, run `tiup list tidb`. This command returns the access methods of the cluster: @@ -202,10 +202,10 @@ As a distributed system, a basic TiDB test cluster usually consists of 2 TiDB in {{< copyable "shell-regular" >}} ```shell - tiup playground v8.2.0 --db 2 --pd 3 --kv 3 + tiup playground v8.3.0 --db 2 --pd 3 --kv 3 ``` - The command downloads a version cluster to the local machine and starts it, such as v8.2.0. To view the latest version, run `tiup list tidb`. + The command downloads a version cluster to the local machine and starts it, such as v8.3.0. To view the latest version, run `tiup list tidb`. This command returns the access methods of the cluster: @@ -437,7 +437,7 @@ Other requirements for the target machine include: ``` - ``: Set the cluster name - - ``: Set the TiDB cluster version, such as `v8.2.0`. You can see all the supported TiDB versions by running the `tiup list tidb` command + - ``: Set the TiDB cluster version, such as `v8.3.0`. You can see all the supported TiDB versions by running the `tiup list tidb` command - `-p`: Specify the password used to connect to the target machine. > **Note:** diff --git a/scale-microservices-using-tiup.md b/scale-microservices-using-tiup.md index 7682ce9ad8c1a..e11c71d455456 100644 --- a/scale-microservices-using-tiup.md +++ b/scale-microservices-using-tiup.md @@ -129,7 +129,7 @@ Starting /root/.tiup/components/cluster/v1.16/cluster display TiDB Cluster: -TiDB Version: v8.2.0 +TiDB Version: v8.3.0 ID Role Host Ports Status Data Dir Deploy Dir diff --git a/scale-tidb-using-tiup.md b/scale-tidb-using-tiup.md index 39e856821c821..52cf7052c09cd 100644 --- a/scale-tidb-using-tiup.md +++ b/scale-tidb-using-tiup.md @@ -298,7 +298,7 @@ This section exemplifies how to remove a TiKV node from the `10.0.1.5` host. ``` Starting /root/.tiup/components/cluster/v1.12.3/cluster display TiDB Cluster: - TiDB Version: v8.2.0 + TiDB Version: v8.3.0 ID Role Host Ports Status Data Dir Deploy Dir -- ---- ---- ----- ------ -------- ---------- 10.0.1.3:8300 cdc 10.0.1.3 8300 Up data/cdc-8300 deploy/cdc-8300 diff --git a/system-variables.md b/system-variables.md index fd12bdf445bc4..f3177221a3a39 100644 --- a/system-variables.md +++ b/system-variables.md @@ -6359,7 +6359,7 @@ Internally, the TiDB parser transforms the `SET TRANSACTION ISOLATION LEVEL [REA - Scope: NONE - Applies to hint [SET_VAR](/optimizer-hints.md#set_varvar_namevar_value): No - Default value: `8.0.11-TiDB-`(tidb version) -- This variable returns the MySQL version, followed by the TiDB version. For example '8.0.11-TiDB-v8.2.0'. +- This variable returns the MySQL version, followed by the TiDB version. For example '8.0.11-TiDB-v8.3.0'. ### version_comment diff --git a/ticdc/deploy-ticdc.md b/ticdc/deploy-ticdc.md index 0ec7ff2c1a78a..dcef00f6019d2 100644 --- a/ticdc/deploy-ticdc.md +++ b/ticdc/deploy-ticdc.md @@ -95,7 +95,7 @@ tiup cluster upgrade --transfer-timeout 600 > **Note:** > -> In the preceding command, you need to replace `` and `` with the actual cluster name and cluster version. For example, the version can be v8.2.0. +> In the preceding command, you need to replace `` and `` with the actual cluster name and cluster version. For example, the version can be v8.3.0. ### Upgrade cautions @@ -152,7 +152,7 @@ See [Enable TLS Between TiDB Components](/enable-tls-between-components.md). ## View TiCDC status using the command-line tool -Run the following command to view the TiCDC cluster status. Note that you need to replace `v` with the TiCDC cluster version, such as `v8.2.0`: +Run the following command to view the TiCDC cluster status. Note that you need to replace `v` with the TiCDC cluster version, such as `v8.3.0`: ```shell tiup cdc:v cli capture list --server=http://10.0.10.25:8300 diff --git a/ticdc/ticdc-changefeed-config.md b/ticdc/ticdc-changefeed-config.md index d535f5c01ed26..9de7c6bef5be8 100644 --- a/ticdc/ticdc-changefeed-config.md +++ b/ticdc/ticdc-changefeed-config.md @@ -16,7 +16,7 @@ cdc cli changefeed create --server=http://10.0.10.25:8300 --sink-uri="mysql://ro ```shell Create changefeed successfully! ID: simple-replication-task -Info: {"upstream_id":7178706266519722477,"namespace":"default","id":"simple-replication-task","sink_uri":"mysql://root:xxxxx@127.0.0.1:4000/?time-zone=","create_time":"2024-07-04T15:05:46.679218+08:00","start_ts":438156275634929669,"engine":"unified","config":{"case_sensitive":false,"enable_old_value":true,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":true,"bdr_mode":false,"sync_point_interval":30000000000,"sync_point_retention":3600000000000,"filter":{"rules":["test.*"],"event_filters":null},"mounter":{"worker_num":16},"sink":{"protocol":"","schema_registry":"","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false},"column_selectors":null,"transaction_atomicity":"none","encoder_concurrency":16,"terminator":"\r\n","date_separator":"none","enable_partition_separator":false},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"storage":""}},"state":"normal","creator_version":"v8.2.0"} +Info: {"upstream_id":7178706266519722477,"namespace":"default","id":"simple-replication-task","sink_uri":"mysql://root:xxxxx@127.0.0.1:4000/?time-zone=","create_time":"2024-08-22T15:05:46.679218+08:00","start_ts":438156275634929669,"engine":"unified","config":{"case_sensitive":false,"enable_old_value":true,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":true,"bdr_mode":false,"sync_point_interval":30000000000,"sync_point_retention":3600000000000,"filter":{"rules":["test.*"],"event_filters":null},"mounter":{"worker_num":16},"sink":{"protocol":"","schema_registry":"","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false},"column_selectors":null,"transaction_atomicity":"none","encoder_concurrency":16,"terminator":"\r\n","date_separator":"none","enable_partition_separator":false},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"storage":""}},"state":"normal","creator_version":"v8.3.0"} ``` - `--changefeed-id`: The ID of the replication task. The format must match the `^[a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*$` regular expression. If this ID is not specified, TiCDC automatically generates a UUID (the version 4 format) as the ID. diff --git a/ticdc/ticdc-changefeed-overview.md b/ticdc/ticdc-changefeed-overview.md index 08591c8c12c07..5164f0e3c70b6 100644 --- a/ticdc/ticdc-changefeed-overview.md +++ b/ticdc/ticdc-changefeed-overview.md @@ -44,4 +44,4 @@ You can manage a TiCDC cluster and its replication tasks using the command-line You can also use the HTTP interface (the TiCDC OpenAPI feature) to manage a TiCDC cluster and its replication tasks. For details, see [TiCDC OpenAPI](/ticdc/ticdc-open-api.md). -If your TiCDC is deployed using TiUP, you can start `cdc cli` by running the `tiup cdc:v cli` command. Replace `v` with the TiCDC cluster version, such as `v8.2.0`. You can also run `cdc cli` directly. +If your TiCDC is deployed using TiUP, you can start `cdc cli` by running the `tiup cdc:v cli` command. Replace `v` with the TiCDC cluster version, such as `v8.3.0`. You can also run `cdc cli` directly. diff --git a/ticdc/ticdc-manage-changefeed.md b/ticdc/ticdc-manage-changefeed.md index 9ffdb3aa16998..560ca5b5457da 100644 --- a/ticdc/ticdc-manage-changefeed.md +++ b/ticdc/ticdc-manage-changefeed.md @@ -19,7 +19,7 @@ cdc cli changefeed create --server=http://10.0.10.25:8300 --sink-uri="mysql://ro ```shell Create changefeed successfully! ID: simple-replication-task -Info: {"upstream_id":7178706266519722477,"namespace":"default","id":"simple-replication-task","sink_uri":"mysql://root:xxxxx@127.0.0.1:4000/?time-zone=","create_time":"2024-07-04T15:05:46.679218+08:00","start_ts":438156275634929669,"engine":"unified","config":{"case_sensitive":false,"enable_old_value":true,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":true,"bdr_mode":false,"sync_point_interval":30000000000,"sync_point_retention":3600000000000,"filter":{"rules":["test.*"],"event_filters":null},"mounter":{"worker_num":16},"sink":{"protocol":"","schema_registry":"","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false},"column_selectors":null,"transaction_atomicity":"none","encoder_concurrency":16,"terminator":"\r\n","date_separator":"none","enable_partition_separator":false},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"storage":""}},"state":"normal","creator_version":"v8.2.0"} +Info: {"upstream_id":7178706266519722477,"namespace":"default","id":"simple-replication-task","sink_uri":"mysql://root:xxxxx@127.0.0.1:4000/?time-zone=","create_time":"2024-08-22T15:05:46.679218+08:00","start_ts":438156275634929669,"engine":"unified","config":{"case_sensitive":false,"enable_old_value":true,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":true,"bdr_mode":false,"sync_point_interval":30000000000,"sync_point_retention":3600000000000,"filter":{"rules":["test.*"],"event_filters":null},"mounter":{"worker_num":16},"sink":{"protocol":"","schema_registry":"","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false},"column_selectors":null,"transaction_atomicity":"none","encoder_concurrency":16,"terminator":"\r\n","date_separator":"none","enable_partition_separator":false},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"storage":""}},"state":"normal","creator_version":"v8.3.0"} ``` ## Query the replication task list diff --git a/ticdc/ticdc-open-api-v2.md b/ticdc/ticdc-open-api-v2.md index e9d3028a5e6f0..b6a1b24597d2f 100644 --- a/ticdc/ticdc-open-api-v2.md +++ b/ticdc/ticdc-open-api-v2.md @@ -92,7 +92,7 @@ curl -X GET http://127.0.0.1:8300/api/v2/status ```json { - "version": "v8.2.0", + "version": "v8.3.0", "git_hash": "10413bded1bdb2850aa6d7b94eb375102e9c44dc", "id": "d2912e63-3349-447c-90ba-72a4e04b5e9e", "pid": 1447, diff --git a/ticdc/ticdc-sink-to-cloud-storage.md b/ticdc/ticdc-sink-to-cloud-storage.md index 6d1faebb3b81a..627e47646f752 100644 --- a/ticdc/ticdc-sink-to-cloud-storage.md +++ b/ticdc/ticdc-sink-to-cloud-storage.md @@ -24,7 +24,7 @@ cdc cli changefeed create \ The output is as follows: ```shell -Info: {"upstream_id":7171388873935111376,"namespace":"default","id":"simple-replication-task","sink_uri":"s3://logbucket/storage_test?protocol=canal-json","create_time":"2024-07-04T18:52:05.566016967+08:00","start_ts":437706850431664129,"engine":"unified","config":{"case_sensitive":false,"enable_old_value":true,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"],"event_filters":null},"mounter":{"worker_num":16},"sink":{"protocol":"canal-json","schema_registry":"","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false},"column_selectors":null,"transaction_atomicity":"none","encoder_concurrency":16,"terminator":"\r\n","date_separator":"none","enable_partition_separator":false},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"storage":""}},"state":"normal","creator_version":"v8.2.0"} +Info: {"upstream_id":7171388873935111376,"namespace":"default","id":"simple-replication-task","sink_uri":"s3://logbucket/storage_test?protocol=canal-json","create_time":"2024-08-22T18:52:05.566016967+08:00","start_ts":437706850431664129,"engine":"unified","config":{"case_sensitive":false,"enable_old_value":true,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"],"event_filters":null},"mounter":{"worker_num":16},"sink":{"protocol":"canal-json","schema_registry":"","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false},"column_selectors":null,"transaction_atomicity":"none","encoder_concurrency":16,"terminator":"\r\n","date_separator":"none","enable_partition_separator":false},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"storage":""}},"state":"normal","creator_version":"v8.3.0"} ``` - `--server`: The address of any TiCDC server in the TiCDC cluster. diff --git a/ticdc/ticdc-sink-to-pulsar.md b/ticdc/ticdc-sink-to-pulsar.md index 1898e13f6c1c9..0c7c4bbf54681 100644 --- a/ticdc/ticdc-sink-to-pulsar.md +++ b/ticdc/ticdc-sink-to-pulsar.md @@ -23,7 +23,7 @@ cdc cli changefeed create \ Create changefeed successfully! ID: simple-replication-task -Info: {"upstream_id":7277814241002263370,"namespace":"default","id":"simple-replication-task","sink_uri":"pulsar://127.0.0.1:6650/consumer-test?protocol=canal-json","create_time":"2024-07-04T14:42:32.000904+08:00","start_ts":444203257406423044,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["pulsar_test.*"]},"mounter":{"worker_num":16},"sink":{"protocol":"canal-json","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"dispatchers":[{"matcher":["pulsar_test.*"],"partition":"","topic":"test_{schema}_{table}"}],"encoder_concurrency":16,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"pulsar_config":{"connection-timeout":30,"operation-timeout":30,"batching-max-messages":1000,"batching-max-publish-delay":10,"send-timeout":30},"advance_timeout":150},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"use_file_backend":false},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"}},"state":"normal","creator_version":"v8.2.0","resolved_ts":444203257406423044,"checkpoint_ts":444203257406423044,"checkpoint_time":"2024-07-04 14:42:31.410"} +Info: {"upstream_id":7277814241002263370,"namespace":"default","id":"simple-replication-task","sink_uri":"pulsar://127.0.0.1:6650/consumer-test?protocol=canal-json","create_time":"2024-08-22T14:42:32.000904+08:00","start_ts":444203257406423044,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["pulsar_test.*"]},"mounter":{"worker_num":16},"sink":{"protocol":"canal-json","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"dispatchers":[{"matcher":["pulsar_test.*"],"partition":"","topic":"test_{schema}_{table}"}],"encoder_concurrency":16,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"pulsar_config":{"connection-timeout":30,"operation-timeout":30,"batching-max-messages":1000,"batching-max-publish-delay":10,"send-timeout":30},"advance_timeout":150},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"use_file_backend":false},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"}},"state":"normal","creator_version":"v8.3.0","resolved_ts":444203257406423044,"checkpoint_ts":444203257406423044,"checkpoint_time":"2024-08-22 14:42:31.410"} ``` The meaning of each parameter is as follows: diff --git a/tidb-binlog/get-started-with-tidb-binlog.md b/tidb-binlog/get-started-with-tidb-binlog.md index 27cc03cae5c45..2a3c84faac804 100644 --- a/tidb-binlog/get-started-with-tidb-binlog.md +++ b/tidb-binlog/get-started-with-tidb-binlog.md @@ -43,7 +43,7 @@ sudo yum install -y mariadb-server ``` ```bash -curl -L https://download.pingcap.org/tidb-community-server-v8.2.0-linux-amd64.tar.gz | tar xzf - +curl -L https://download.pingcap.org/tidb-community-server-v8.3.0-linux-amd64.tar.gz | tar xzf - cd tidb-latest-linux-amd64 ``` diff --git a/tidb-monitoring-api.md b/tidb-monitoring-api.md index e60f49d14e8d7..886738c464586 100644 --- a/tidb-monitoring-api.md +++ b/tidb-monitoring-api.md @@ -28,7 +28,7 @@ The following example uses `http://${host}:${port}/status` to get the current st curl http://127.0.0.1:10080/status { connections: 0, # The current number of clients connected to the TiDB server. - version: "8.0.11-TiDB-v8.2.0", # The TiDB version number. + version: "8.0.11-TiDB-v8.3.0", # The TiDB version number. git_hash: "778c3f4a5a716880bcd1d71b257c8165685f0d70" # The Git Hash of the current TiDB code. } ``` diff --git a/tiflash/create-tiflash-replicas.md b/tiflash/create-tiflash-replicas.md index 08132d05a2215..cc9dc6f59c931 100644 --- a/tiflash/create-tiflash-replicas.md +++ b/tiflash/create-tiflash-replicas.md @@ -147,10 +147,10 @@ Before TiFlash replicas are added, each TiKV instance performs a full table scan tiup ctl:v pd -u http://:2379 store limit all engine tiflash 60 add-peer ``` - > In the preceding command, you need to replace `v` with the actual cluster version, such as `v8.2.0` and `:2379` with the address of any PD node. For example: + > In the preceding command, you need to replace `v` with the actual cluster version, such as `v8.3.0` and `:2379` with the address of any PD node. For example: > > ```shell - > tiup ctl:v8.2.0 pd -u http://192.168.1.4:2379 store limit all engine tiflash 60 add-peer + > tiup ctl:v8.3.0 pd -u http://192.168.1.4:2379 store limit all engine tiflash 60 add-peer > ``` Within a few minutes, you will observe a significant increase in CPU and disk IO resource usage of the TiFlash nodes, and TiFlash should create replicas faster. At the same time, the TiKV nodes' CPU and disk IO resource usage increases as well. diff --git a/tiup/tiup-cluster.md b/tiup/tiup-cluster.md index 3c68d87ea1c84..fae4e1bd2dcc7 100644 --- a/tiup/tiup-cluster.md +++ b/tiup/tiup-cluster.md @@ -62,7 +62,7 @@ To deploy the cluster, run the `tiup cluster deploy` command. The usage of the c tiup cluster deploy [flags] ``` -This command requires you to provide the cluster name, the TiDB cluster version (such as `v8.2.0`), and a topology file of the cluster. +This command requires you to provide the cluster name, the TiDB cluster version (such as `v8.3.0`), and a topology file of the cluster. To write a topology file, refer to [the example](https://github.com/pingcap/tiup/blob/master/embed/examples/cluster/topology.example.yaml). The following file is an example of the simplest topology: @@ -122,12 +122,12 @@ tidb_servers: ... ``` -Save the file as `/tmp/topology.yaml`. If you want to use TiDB v8.2.0 and your cluster name is `prod-cluster`, run the following command: +Save the file as `/tmp/topology.yaml`. If you want to use TiDB v8.3.0 and your cluster name is `prod-cluster`, run the following command: {{< copyable "shell-regular" >}} ```shell -tiup cluster deploy -p prod-cluster v8.2.0 /tmp/topology.yaml +tiup cluster deploy -p prod-cluster v8.3.0 /tmp/topology.yaml ``` During the execution, TiUP asks you to confirm your topology again and requires the root password of the target machine (the `-p` flag means inputting password): @@ -135,7 +135,7 @@ During the execution, TiUP asks you to confirm your topology again and requires ```bash Please confirm your topology: TiDB Cluster: prod-cluster -TiDB Version: v8.2.0 +TiDB Version: v8.3.0 Type Host Ports OS/Arch Directories ---- ---- ----- ------- ----------- pd 172.16.5.134 2379/2380 linux/x86_64 deploy/pd-2379,data/pd-2379 @@ -179,7 +179,7 @@ tiup cluster list Starting /root/.tiup/components/cluster/v1.12.3/cluster list Name User Version Path PrivateKey ---- ---- ------- ---- ---------- -prod-cluster tidb v8.2.0 /root/.tiup/storage/cluster/clusters/prod-cluster /root/.tiup/storage/cluster/clusters/prod-cluster/ssh/id_rsa +prod-cluster tidb v8.3.0 /root/.tiup/storage/cluster/clusters/prod-cluster /root/.tiup/storage/cluster/clusters/prod-cluster/ssh/id_rsa ``` ## Start the cluster @@ -209,7 +209,7 @@ tiup cluster display prod-cluster ``` Starting /root/.tiup/components/cluster/v1.12.3/cluster display prod-cluster TiDB Cluster: prod-cluster -TiDB Version: v8.2.0 +TiDB Version: v8.3.0 ID Role Host Ports OS/Arch Status Data Dir Deploy Dir -- ---- ---- ----- ------- ------ -------- ---------- 172.16.5.134:3000 grafana 172.16.5.134 3000 linux/x86_64 Up - deploy/grafana-3000 @@ -284,7 +284,7 @@ tiup cluster display prod-cluster ``` Starting /root/.tiup/components/cluster/v1.12.3/cluster display prod-cluster TiDB Cluster: prod-cluster -TiDB Version: v8.2.0 +TiDB Version: v8.3.0 ID Role Host Ports OS/Arch Status Data Dir Deploy Dir -- ---- ---- ----- ------- ------ -------- ---------- 172.16.5.134:3000 grafana 172.16.5.134 3000 linux/x86_64 Up - deploy/grafana-3000 @@ -396,12 +396,12 @@ Global Flags: -y, --yes Skip all confirmations and assumes 'yes' ``` -For example, the following command upgrades the cluster to v8.2.0: +For example, the following command upgrades the cluster to v8.3.0: {{< copyable "shell-regular" >}} ```bash -tiup cluster upgrade tidb-test v8.2.0 +tiup cluster upgrade tidb-test v8.3.0 ``` ## Update configuration @@ -586,11 +586,11 @@ tiup cluster audit Starting component `cluster`: /home/tidb/.tiup/components/cluster/v1.12.3/cluster audit ID Time Command -- ---- ------- -4BLhr0 2024-07-04T23:55:09+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.2.0 /tmp/topology.yaml -4BKWjF 2024-07-04T23:36:57+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.2.0 /tmp/topology.yaml -4BKVwH 2024-07-04T23:02:08+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.2.0 /tmp/topology.yaml -4BKKH1 2024-07-04T16:39:04+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster destroy test -4BKKDx 2024-07-04T16:36:57+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.2.0 /tmp/topology.yaml +4BLhr0 2024-08-22T23:55:09+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.3.0 /tmp/topology.yaml +4BKWjF 2024-08-22T23:36:57+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.3.0 /tmp/topology.yaml +4BKVwH 2024-08-22T23:02:08+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.3.0 /tmp/topology.yaml +4BKKH1 2024-08-22T16:39:04+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster destroy test +4BKKDx 2024-08-22T16:36:57+08:00 /home/tidb/.tiup/components/cluster/v1.12.3/cluster deploy test v8.3.0 /tmp/topology.yaml ``` The first column is `audit-id`. To view the execution log of a certain command, pass the `audit-id` of a command as the flag as follows: @@ -706,7 +706,7 @@ All operations above performed on the cluster machine use the SSH client embedde Then you can use the `--ssh=system` command-line flag to enable the system-native command-line tool: -- Deploy a cluster: `tiup cluster deploy --ssh=system`. Fill in the name of your cluster for ``, the TiDB version to be deployed (such as `v8.2.0`) for ``, and the topology file for ``. +- Deploy a cluster: `tiup cluster deploy --ssh=system`. Fill in the name of your cluster for ``, the TiDB version to be deployed (such as `v8.3.0`) for ``, and the topology file for ``. - Start a cluster: `tiup cluster start --ssh=system` - Upgrade a cluster: `tiup cluster upgrade ... --ssh=system` diff --git a/tiup/tiup-component-cluster-deploy.md b/tiup/tiup-component-cluster-deploy.md index 80c35b9a8351d..5ec0b0e2e2a0a 100644 --- a/tiup/tiup-component-cluster-deploy.md +++ b/tiup/tiup-component-cluster-deploy.md @@ -14,7 +14,7 @@ tiup cluster deploy [flags] ``` - ``: the name of the new cluster, which cannot be the same as the existing cluster names. -- ``: the version number of the TiDB cluster to deploy, such as `v8.2.0`. +- ``: the version number of the TiDB cluster to deploy, such as `v8.3.0`. - ``: the prepared [topology file](/tiup/tiup-cluster-topology-reference.md). ## Options diff --git a/tiup/tiup-component-cluster-patch.md b/tiup/tiup-component-cluster-patch.md index a2dfc811a6058..a1c3c99466247 100644 --- a/tiup/tiup-component-cluster-patch.md +++ b/tiup/tiup-component-cluster-patch.md @@ -29,7 +29,7 @@ Before running the `tiup cluster patch` command, you need to pack the binary pac 1. Determine the following variables: - `${component}`: the name of the component to be replaced (such as `tidb`, `tikv`, or `pd`). - - `${version}`: the version of the component (such as `v8.2.0` or `v7.5.2`). + - `${version}`: the version of the component (such as `v8.3.0` or `v7.5.3`). - `${os}`: the operating system (`linux`). - `${arch}`: the platform on which the component runs (`amd64`, `arm64`). diff --git a/tiup/tiup-component-cluster-upgrade.md b/tiup/tiup-component-cluster-upgrade.md index 261da16749a07..ffcab8d27ebff 100644 --- a/tiup/tiup-component-cluster-upgrade.md +++ b/tiup/tiup-component-cluster-upgrade.md @@ -14,7 +14,7 @@ tiup cluster upgrade [flags] ``` - ``: the cluster name to operate on. If you forget the cluster name, you can check it with the [cluster list](/tiup/tiup-component-cluster-list.md) command. -- ``: the target version to upgrade to, such as `v8.2.0`. Currently, it is only allowed to upgrade to a version higher than the current cluster, that is, no downgrade is allowed. It is also not allowed to upgrade to the nightly version. +- ``: the target version to upgrade to, such as `v8.3.0`. Currently, it is only allowed to upgrade to a version higher than the current cluster, that is, no downgrade is allowed. It is also not allowed to upgrade to the nightly version. ## Options diff --git a/tiup/tiup-component-dm-upgrade.md b/tiup/tiup-component-dm-upgrade.md index 00e495e4e4bb1..44b695eaad13d 100644 --- a/tiup/tiup-component-dm-upgrade.md +++ b/tiup/tiup-component-dm-upgrade.md @@ -14,7 +14,7 @@ tiup dm upgrade [flags] ``` - `` is the name of the cluster to be operated on. If you forget the cluster name, you can check it using the [`tiup dm list`](/tiup/tiup-component-dm-list.md) command. -- `` is the target version to be upgraded to, such as `v8.2.0`. Currently, only upgrading to a later version is allowed, and upgrading to an earlier version is not allowed, which means the downgrade is not allowed. Upgrading to a nightly version is not allowed either. +- `` is the target version to be upgraded to, such as `v8.3.0`. Currently, only upgrading to a later version is allowed, and upgrading to an earlier version is not allowed, which means the downgrade is not allowed. Upgrading to a nightly version is not allowed either. ## Options diff --git a/tiup/tiup-component-management.md b/tiup/tiup-component-management.md index 880d224f5a8d7..0cf5630d46b48 100644 --- a/tiup/tiup-component-management.md +++ b/tiup/tiup-component-management.md @@ -72,12 +72,12 @@ Example 2: Use TiUP to install the nightly version of TiDB. tiup install tidb:nightly ``` -Example 3: Use TiUP to install TiKV v8.2.0. +Example 3: Use TiUP to install TiKV v8.3.0. {{< copyable "shell-regular" >}} ```shell -tiup install tikv:v8.2.0 +tiup install tikv:v8.3.0 ``` ## Upgrade components @@ -130,12 +130,12 @@ Before the component is started, TiUP creates a directory for it, and then puts If you want to start the same component multiple times and reuse the previous working directory, you can use `--tag` to specify the same name when the component is started. After the tag is specified, the working directory will *not be automatically deleted* when the instance is terminated, which makes it convenient to reuse the working directory. -Example 1: Operate TiDB v8.2.0. +Example 1: Operate TiDB v8.3.0. {{< copyable "shell-regular" >}} ```shell -tiup tidb:v8.2.0 +tiup tidb:v8.3.0 ``` Example 2: Specify the tag with which TiKV operates. @@ -221,12 +221,12 @@ The following flags are supported in this command: - If the version is ignored, adding `--all` means to uninstall all versions of this component. - If the version and the component are both ignored, adding `--all` means to uninstall all components of all versions. -Example 1: Uninstall TiDB v8.2.0. +Example 1: Uninstall TiDB v8.3.0. {{< copyable "shell-regular" >}} ```shell -tiup uninstall tidb:v8.2.0 +tiup uninstall tidb:v8.3.0 ``` Example 2: Uninstall TiKV of all versions. diff --git a/tiup/tiup-mirror.md b/tiup/tiup-mirror.md index 6a179d39c5d37..a291d9d3e01d1 100644 --- a/tiup/tiup-mirror.md +++ b/tiup/tiup-mirror.md @@ -87,9 +87,9 @@ The `tiup mirror clone` command provides many optional flags (might provide more If you want to clone only one version (not all versions) of a component, use `--=` to specify this version. For example: - - Execute the `tiup mirror clone --tidb v8.2.0` command to clone the v8.2.0 version of the TiDB component. - - Run the `tiup mirror clone --tidb v8.2.0 --tikv all` command to clone the v8.2.0 version of the TiDB component and all versions of the TiKV component. - - Run the `tiup mirror clone v8.2.0` command to clone the v8.2.0 version of all components in a cluster. + - Execute the `tiup mirror clone --tidb v8.3.0` command to clone the v8.3.0 version of the TiDB component. + - Run the `tiup mirror clone --tidb v8.3.0 --tikv all` command to clone the v8.3.0 version of the TiDB component and all versions of the TiKV component. + - Run the `tiup mirror clone v8.3.0` command to clone the v8.3.0 version of all components in a cluster. After cloning, signing keys are set up automatically. diff --git a/tiup/tiup-playground.md b/tiup/tiup-playground.md index 1925891c3865c..b88596275e21a 100644 --- a/tiup/tiup-playground.md +++ b/tiup/tiup-playground.md @@ -22,7 +22,7 @@ This command actually performs the following operations: - Because this command does not specify the version of the playground component, TiUP first checks the latest version of the installed playground component. Assume that the latest version is v1.12.3, then this command works the same as `tiup playground:v1.12.3`. - If you have not used TiUP playground to install the TiDB, TiKV, and PD components, the playground component installs the latest stable version of these components, and then start these instances. -- Because this command does not specify the version of the TiDB, PD, and TiKV component, TiUP playground uses the latest version of each component by default. Assume that the latest version is v8.2.0, then this command works the same as `tiup playground:v1.12.3 v8.2.0`. +- Because this command does not specify the version of the TiDB, PD, and TiKV component, TiUP playground uses the latest version of each component by default. Assume that the latest version is v8.3.0, then this command works the same as `tiup playground:v1.12.3 v8.3.0`. - Because this command does not specify the number of each component, TiUP playground, by default, starts a smallest cluster that consists of one TiDB instance, one TiKV instance, one PD instance, and one TiFlash instance. - After starting each TiDB component, TiUP playground reminds you that the cluster is successfully started and provides you some useful information, such as how to connect to the TiDB cluster through the MySQL client and how to access the [TiDB Dashboard](/dashboard/dashboard-intro.md). @@ -187,7 +187,7 @@ tiup playground scale-in --pid 86526 Starting from v8.2.0, [PD microservice mode](/pd-microservices.md) (experimental) can be deployed using TiUP. You can deploy the `tso` microservice and `scheduling` microservice for your cluster using TiUP Playground as follows: ```shell -tiup playground v8.2.0 --pd.mode ms --pd 3 --tso 2 --scheduling 2 +tiup playground v8.3.0 --pd.mode ms --pd 3 --tso 2 --scheduling 2 ``` - `--pd.mode`: setting it to `ms` means enabling the microservice mode for PD. diff --git a/upgrade-monitoring-services.md b/upgrade-monitoring-services.md index fcb792cbef2ce..5df8aafe07962 100644 --- a/upgrade-monitoring-services.md +++ b/upgrade-monitoring-services.md @@ -36,7 +36,7 @@ Download a new installation package from the [Prometheus download page](https:// > **Tip:** > - > `{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.2.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.2.0-linux-amd64.tar.gz`. + > `{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.3.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.3.0-linux-amd64.tar.gz`. 2. In the extracted files, locate `prometheus-v{version}-linux-amd64.tar.gz` and extract it. @@ -85,7 +85,7 @@ In the following upgrade steps, you need to download the Grafana installation pa > **Tip:** > - > `{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.2.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.2.0-linux-amd64.tar.gz`. + > `{version}` in the link indicates the version number of TiDB and `{arch}` indicates the architecture of the system, which can be `amd64` or `arm64`. For example, the download link for `v8.3.0` in the `amd64` architecture is `https://download.pingcap.org/tidb-community-toolkit-v8.3.0-linux-amd64.tar.gz`. 2. In the extracted files, locate `grafana-v{version}-linux-amd64.tar.gz` and extract it. diff --git a/upgrade-tidb-using-tiup.md b/upgrade-tidb-using-tiup.md index 19c7a4f930ad5..0974edaf4d5df 100644 --- a/upgrade-tidb-using-tiup.md +++ b/upgrade-tidb-using-tiup.md @@ -8,11 +8,11 @@ aliases: ['/docs/dev/upgrade-tidb-using-tiup/','/docs/dev/how-to/upgrade/using-t This document is targeted for the following upgrade paths: -- Upgrade from TiDB 4.0 versions to TiDB 8.2. -- Upgrade from TiDB 5.0-5.4 versions to TiDB 8.2. -- Upgrade from TiDB 6.0-6.6 to TiDB 8.2. -- Upgrade from TiDB 7.0-7.6 to TiDB 8.2. -- Upgrade from TiDB 8.0-8.1 to TiDB 8.2. +- Upgrade from TiDB 4.0 versions to TiDB 8.3. +- Upgrade from TiDB 5.0-5.4 versions to TiDB 8.3. +- Upgrade from TiDB 6.0-6.6 to TiDB 8.3. +- Upgrade from TiDB 7.0-7.6 to TiDB 8.3. +- Upgrade from TiDB 8.0-8.2 to TiDB 8.3. > **Warning:** > @@ -25,7 +25,7 @@ This document is targeted for the following upgrade paths: > **Note:** > -> - If your cluster to be upgraded is v3.1 or an earlier version (v3.0 or v2.1), the direct upgrade to v8.2.0 is not supported. You need to upgrade your cluster first to v4.0 and then to v8.2.0. +> - If your cluster to be upgraded is v3.1 or an earlier version (v3.0 or v2.1), the direct upgrade to v8.3.0 is not supported. You need to upgrade your cluster first to v4.0 and then to v8.3.0. > - If your cluster to be upgraded is earlier than v6.2, the upgrade might get stuck when you upgrade the cluster to v6.2 or later versions in some scenarios. You can refer to [How to fix the issue](#how-to-fix-the-issue-that-the-upgrade-gets-stuck-when-upgrading-to-v620-or-later-versions). > - TiDB nodes use the value of the [`server-version`](/tidb-configuration-file.md#server-version) configuration item to verify the current TiDB version. Therefore, to avoid unexpected behaviors, before upgrading the TiDB cluster, you need to set the value of `server-version` to empty or the real version of the current TiDB cluster. > - Setting the [`performance.force-init-stats`](/tidb-configuration-file.md#force-init-stats-new-in-v657-and-v710) configuration item to `ON` prolongs the TiDB startup time, which might cause startup timeouts and upgrade failures. To avoid this issue, it is recommended to set a longer waiting timeout for TiUP. @@ -57,12 +57,12 @@ This document is targeted for the following upgrade paths: ## Upgrade caveat - TiDB currently does not support version downgrade or rolling back to an earlier version after the upgrade. -- For the v4.0 cluster managed using TiDB Ansible, you need to import the cluster to TiUP (`tiup cluster`) for new management according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). Then you can upgrade the cluster to v8.2.0 according to this document. -- To update versions earlier than v3.0 to v8.2.0: +- For the v4.0 cluster managed using TiDB Ansible, you need to import the cluster to TiUP (`tiup cluster`) for new management according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). Then you can upgrade the cluster to v8.3.0 according to this document. +- To update versions earlier than v3.0 to v8.3.0: 1. Update this version to 3.0 using [TiDB Ansible](https://docs.pingcap.com/tidb/v3.0/upgrade-tidb-using-ansible). 2. Use TiUP (`tiup cluster`) to import the TiDB Ansible configuration. 3. Update the 3.0 version to 4.0 according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). - 4. Upgrade the cluster to v8.2.0 according to this document. + 4. Upgrade the cluster to v8.3.0 according to this document. - Support upgrading the versions of TiDB Binlog (deprecated), TiCDC, TiFlash, and other components. - When upgrading TiFlash from versions earlier than v6.3.0 to v6.3.0 and later versions, note that the CPU must support the AVX2 instruction set under the Linux AMD64 architecture and the ARMv8 instruction set architecture under the Linux ARM64 architecture. For details, see the description in [v6.3.0 Release Notes](/releases/release-6.3.0.md#others). - For detailed compatibility changes of different versions, see the [Release Notes](/releases/release-notes.md) of each version. Modify your cluster configuration according to the "Compatibility Changes" section of the corresponding release notes. @@ -74,7 +74,7 @@ This section introduces the preparation works needed before upgrading your TiDB ### Step 1: Review compatibility changes -Review [the compatibility changes](/releases/release-8.2.0.md#compatibility-changes) in TiDB v8.2.0 release notes. If any changes affect your upgrade, take actions accordingly. +Review [the compatibility changes](/releases/release-8.3.0.md#compatibility-changes) in TiDB v8.3.0 release notes. If any changes affect your upgrade, take actions accordingly. ### Step 2: Upgrade TiUP or TiUP offline mirror @@ -149,7 +149,7 @@ Now, the offline mirror has been upgraded successfully. If an error occurs durin > Skip this step if one of the following situations applies: > > + You have not modified the configuration parameters of the original cluster. Or you have modified the configuration parameters using `tiup cluster` but no more modification is needed. -> + After the upgrade, you want to use v8.2.0's default parameter values for the unmodified configuration items. +> + After the upgrade, you want to use v8.3.0's default parameter values for the unmodified configuration items. 1. Enter the `vi` editing mode to edit the topology file: @@ -165,7 +165,7 @@ Now, the offline mirror has been upgraded successfully. If an error occurs durin > **Note:** > -> Before you upgrade the cluster to v6.6.0, make sure that the parameters you have modified in v4.0 are compatible in v8.2.0. For details, see [TiKV Configuration File](/tikv-configuration-file.md). +> Before you upgrade the cluster to v6.6.0, make sure that the parameters you have modified in v4.0 are compatible in v8.3.0. For details, see [TiKV Configuration File](/tikv-configuration-file.md). ### Step 4: Check the DDL and backup status of the cluster @@ -213,12 +213,12 @@ If your application has a maintenance window for the database to be stopped for tiup cluster upgrade ``` -For example, if you want to upgrade the cluster to v8.2.0: +For example, if you want to upgrade the cluster to v8.3.0: {{< copyable "shell-regular" >}} ```shell -tiup cluster upgrade v8.2.0 +tiup cluster upgrade v8.3.0 ``` > **Note:** @@ -266,7 +266,7 @@ tiup cluster upgrade -h | grep "version" tiup cluster stop ``` -2. Use the `upgrade` command with the `--offline` option to perform the offline upgrade. Fill in the name of your cluster for `` and the version to upgrade to for ``, such as `v8.2.0`. +2. Use the `upgrade` command with the `--offline` option to perform the offline upgrade. Fill in the name of your cluster for `` and the version to upgrade to for ``, such as `v8.3.0`. {{< copyable "shell-regular" >}} @@ -295,7 +295,7 @@ tiup cluster display ``` Cluster type: tidb Cluster name: -Cluster version: v8.2.0 +Cluster version: v8.3.0 ``` ## FAQ @@ -350,7 +350,7 @@ Starting from v6.2.0, TiDB enables the [concurrent DDL framework](/ddl-introduct ### The evict leader has waited too long during the upgrade. How to skip this step for a quick upgrade? -You can specify `--force`. Then the processes of transferring PD leader and evicting TiKV leader are skipped during the upgrade. The cluster is directly restarted to update the version, which has a great impact on the cluster that runs online. In the following command, `` is the version to upgrade to, such as `v8.2.0`. +You can specify `--force`. Then the processes of transferring PD leader and evicting TiKV leader are skipped during the upgrade. The cluster is directly restarted to update the version, which has a great impact on the cluster that runs online. In the following command, `` is the version to upgrade to, such as `v8.3.0`. {{< copyable "shell-regular" >}} @@ -365,5 +365,5 @@ You can upgrade the tool version by using TiUP to install the `ctl` component of {{< copyable "shell-regular" >}} ```shell -tiup install ctl:v8.2.0 +tiup install ctl:v8.3.0 ``` From 31f8e8649ba05f393646ce990f38ef851917c01d Mon Sep 17 00:00:00 2001 From: Grace Cai Date: Thu, 22 Aug 2024 11:31:43 +0800 Subject: [PATCH 43/44] PD: Add the name field to the startup parameters (#18638) --- command-line-flags-for-scheduling-configuration.md | 6 ++++++ command-line-flags-for-tso-configuration.md | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/command-line-flags-for-scheduling-configuration.md b/command-line-flags-for-scheduling-configuration.md index dd660cb0d86aa..9c9c4cd7acd5f 100644 --- a/command-line-flags-for-scheduling-configuration.md +++ b/command-line-flags-for-scheduling-configuration.md @@ -57,6 +57,12 @@ The Scheduling node is used for providing the `scheduling` microservice for PD. - Default: `""` - If this flag is not set, logs are output to "stderr". If this flag is set, logs are output to the corresponding file. +## `--name` New in v8.3.0 + ++ The name of the current Scheduling node. ++ Default: `"scheduling-${hostname}"` ++ If you need to start multiple Scheduling nodes, it is recommended to configure different names for different nodes for easier identification. + ## `-L` - The log level. diff --git a/command-line-flags-for-tso-configuration.md b/command-line-flags-for-tso-configuration.md index dbc3aedca0ef6..1c61dfa5e8227 100644 --- a/command-line-flags-for-tso-configuration.md +++ b/command-line-flags-for-tso-configuration.md @@ -57,6 +57,12 @@ The TSO node is used for providing the `tso` microservice for PD. You can config - Default: `""` - If this flag is not set, logs are output to "stderr". If this flag is set, logs are output to the corresponding file. +## `--name` New in v8.3.0 + ++ The name of the current TSO node. ++ Default: `"tso-${hostname}"` ++ If you need to start multiple TSO nodes, it is recommended to configure different names for different nodes for easier identification. + ## `-L` - The log level. From d4be1bd5437d600ef1d07f3e28d53aa856a99a6d Mon Sep 17 00:00:00 2001 From: Aolin Date: Thu, 22 Aug 2024 16:52:43 +0800 Subject: [PATCH 44/44] pull_request_template: add v8.4 (#18671) --- .github/pull_request_template.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 473c2344878c9..0478a616ac2d2 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -24,6 +24,7 @@ By default, **CHOOSE MASTER ONLY** so your changes will be applied to the next T For details, see [tips for choosing the affected versions](https://github.com/pingcap/docs/blob/master/CONTRIBUTING.md#guideline-for-choosing-the-affected-versions). - [ ] master (the latest development version) +- [ ] v8.4 (TiDB 8.4 versions) - [ ] v8.3 (TiDB 8.3 versions) - [ ] v8.2 (TiDB 8.2 versions) - [ ] v8.1 (TiDB 8.1 versions)