From 73b4a71c81a5df18a0712dac3ca5411a5f093cf3 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Wed, 27 May 2020 14:03:19 +0800 Subject: [PATCH 1/4] Create config.config --- cc/config.config | 423 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 423 insertions(+) create mode 100644 cc/config.config diff --git a/cc/config.config b/cc/config.config new file mode 100644 index 0000000000..917e3e5e28 --- /dev/null +++ b/cc/config.config @@ -0,0 +1,423 @@ +# TiDB Configuration. + +# TiDB server host. +host = "0.0.0.0" + +# tidb server advertise IP. +advertise-address = "" + +# TiDB server port. +port = 4000 + +# Registered store name, [tikv, mocktikv] +store = "mocktikv" + +# TiDB storage path. +path = "/tmp/tidb" + +# The socket file to use for connection. +socket = "" + +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "45s" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true + +# The limit of concurrent executed sessions. +token-limit = 1000 + +# The maximum memory available for a single SQL statement. Default: 1GB +mem-quota-query = 1073741824 + +# Controls whether to enable the temporary storage for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. +oom-use-tmp-storage = true + +# Specifies the temporary storage path for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. +# It defaults to a generated directory in `/_tidb/` if it is unset. +# It only takes effect when `oom-use-tmp-storage` is `true`. +# tmp-storage-path = "/tmp/_tidb/MC4wLjAuMDo0MDAwLzAuMC4wLjA6MTAwODA=/tmp-storage" + +# Specifies the maximum use of temporary storage (bytes) for all active queries when `oom-use-tmp-storage` is enabled. +# If the `tmp-storage-quota` exceeds the capacity of the temporary storage directory, tidb-server would return an error and exit. +# The default value of tmp-storage-quota is under 0 which means tidb-server wouldn't check the capacity. +tmp-storage-quota = -1 + +# Specifies what operation TiDB performs when a single SQL statement exceeds the memory quota specified by mem-quota-query and cannot be spilled over to disk. +# Valid options: ["log", "cancel"] +oom-action = "cancel" + +# Enable coprocessor streaming. +enable-streaming = false + +# Enable batch commit for the DMLs. +enable-batch-dml = false + +# Set system variable 'lower_case_table_names' +lower-case-table-names = 2 + +# Make "kill query" behavior compatible with MySQL. It's not recommend to +# turn on this option when TiDB server is behind a proxy. +compatible-kill-query = false + +# check mb4 value in utf8 is used to control whether to check the mb4 characters when the charset is utf8. +check-mb4-value-in-utf8 = true + +# treat-old-version-utf8-as-utf8mb4 use for upgrade compatibility. Set to true will treat old version table/column UTF8 charset as UTF8MB4. +treat-old-version-utf8-as-utf8mb4 = true + +# max-index-length is used to deal with compatibility issues from v3.0.7 and previous version upgrades. It can only be in [3072, 3072*4]. +max-index-length = 3072 + +# enable-table-lock is used to control table lock feature. Default is false, indicate the table lock feature is disabled. +enable-table-lock = false + +# delay-clean-table-lock is used to control the time (Milliseconds) of delay before unlock the table in the abnormal situation. +delay-clean-table-lock = 0 + +# Maximum number of the splitting region, which is used by the split region statement. +split-region-max-num = 1000 + +# alter-primary-key is used to control alter primary key feature. Default is false, indicate the alter primary key feature is disabled. +# If it is true, we can add the primary key by "alter table". However, if a table already exists before the switch is turned true and the data type of its primary key column is an integer, +# the primary key cannot be dropped. +alter-primary-key = false + +# server-version is used to change the version string of TiDB in the following scenarios: +# 1. the server version returned by builtin-function `VERSION()`. +# 2. the server version filled in handshake packets of MySQL Connection Protocol, see https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake for more details. +# if server-version = "", the default value(original TiDB version string) is used. +server-version = "" + +# repair mode is used to repair the broken table meta in TiKV in extreme cases. +repair-mode = false + +# Repair table list is used to list the tables in repair mode with the format like ["db.table",]. +# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error. +repair-table-list = [] + +# The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited. +max-server-connections = 0 + +# Whether new collations are enabled, as indicated by its name, this configuration entry take effect ONLY when a TiDB cluster bootstraps for the first time. +new_collations_enabled_on_first_bootstrap = false + +[log] +# Log level: debug, info, warn, error, fatal. +level = "info" + +# Log format, one of json, text, console. +format = "text" + +# Enable automatic timestamps in log output, if not set, it will be defaulted to true. +# enable-timestamp = true + +# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false. +# enable-error-stack = false + +# Whether to enable slow query log. +enable-slow-log = true + +# Stores slow query log into separated files. +slow-query-file = "tidb-slow.log" + +# Queries with execution time greater than this value will be logged. (Milliseconds) +slow-threshold = 300 + +# record-plan-in-slow-log is used to enable record query plan in slow log. +# 0 is disable. 1 is enable. +record-plan-in-slow-log = 1 + +# Queries with internal result greater than this value will be logged. +expensive-threshold = 10000 + +# Maximum query length recorded in log. +query-log-max-len = 4096 + +# File logging. +[log.file] +# Log file name. +filename = "" + +# Max log file size in MB (upper limit to 4096MB). +max-size = 300 + +# Max log file keep days. No clean up by default. +max-days = 0 + +# Maximum number of old log files to retain. No clean up by default. +max-backups = 0 + +[security] +# Path of file that contains list of trusted SSL CAs for connection with mysql client. +ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with mysql client. +ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with mysql client. +ssl-key = "" + +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "" + +[status] +# If enable status report HTTP service. +report-status = true + +# TiDB status host. +status-host = "0.0.0.0" + +## status-host is the HTTP address for reporting the internal status of a TiDB server, for example: +## API for prometheus: http://${status-host}:${status_port}/metrics +## API for pprof: http://${status-host}:${status_port}/debug/pprof +# TiDB status port. +status-port = 10080 + +# Prometheus pushgateway address, leaves it empty will disable push to pushgateway. +metrics-addr = "" + +# Prometheus client push interval in second, set \"0\" to disable push to pushgateway. +metrics-interval = 15 + +# Record statements qps by database name if it is enabled. +record-db-qps = false + +[performance] +# Max CPUs to use, 0 use number of CPUs in the machine. +max-procs = 0 + +# Max memory size to use, 0 use the total usable memory in the machine. +max-memory = 0 + +# StmtCountLimit limits the max count of statement inside a transaction. +stmt-count-limit = 5000 + +# Set keep alive option for tcp connection. +tcp-keep-alive = true + +# Whether support cartesian product. +cross-join = true + +# Stats lease duration, which influences the time of analyze and stats load. +stats-lease = "3s" + +# Run auto analyze worker on this tidb-server. +run-auto-analyze = true + +# Probability to use the query feedback to update stats, 0.0 or 1.0 for always false/true. +feedback-probability = 0.05 + +# The max number of query feedback that cache in memory. +query-feedback-limit = 1024 + +# Pseudo stats will be used if the ratio between the modify count and +# row count in statistics of a table is greater than it. +pseudo-estimate-ratio = 0.8 + +# Force the priority of all statements in a specified priority. +# The value could be "NO_PRIORITY", "LOW_PRIORITY", "HIGH_PRIORITY" or "DELAYED". +force-priority = "NO_PRIORITY" + +# Bind info lease duration, which influences the duration of loading bind info and handling invalid bind. +bind-info-lease = "3s" + +# Whether support pushing down aggregation with distinct to cop task +distinct-agg-push-down = false + +# The limitation of the size in byte for the entries in one transaction. +# If using TiKV as the storage, the entry represents a key/value pair. +# NOTE: If binlog is enabled with Kafka (e.g. arbiter cluster), +# this value should be less than 1073741824(1G) because this is the maximum size that can be handled by Kafka. +# If binlog is disabled or binlog is enabled without Kafka, this value should be less than 10737418240(10G). +txn-total-size-limit = 104857600 + +# The max number of running concurrency two phase committer request for an SQL. +committer-concurrency = 16 + +# max lifetime of transaction ttl manager. +max-txn-ttl = 600000 + +[proxy-protocol] +# PROXY protocol acceptable client networks. +# Empty string means disable PROXY protocol, * means all networks. +networks = "" + +# PROXY protocol header read timeout, unit is second +header-timeout = 5 + +[prepared-plan-cache] +enabled = false +capacity = 100 +memory-guard-ratio = 0.1 + +[opentracing] +# Enable opentracing. +enable = false + +# Whether to enable the rpc metrics. +rpc-metrics = false + +[opentracing.sampler] +# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote +type = "const" + +# Param is a value passed to the sampler. +# Valid values for Param field are: +# - for "const" sampler, 0 or 1 for always false/true respectively +# - for "probabilistic" sampler, a probability between 0 and 1 +# - for "rateLimiting" sampler, the number of spans per second +# - for "remote" sampler, param is the same as for "probabilistic" +# and indicates the initial sampling rate before the actual one +# is received from the mothership +param = 1.0 + +# SamplingServerURL is the address of jaeger-agent's HTTP sampling server +sampling-server-url = "" + +# MaxOperations is the maximum number of operations that the sampler +# will keep track of. If an operation is not tracked, a default probabilistic +# sampler will be used rather than the per operation specific sampler. +max-operations = 0 + +# SamplingRefreshInterval controls how often the remotely controlled sampler will poll +# jaeger-agent for the appropriate sampling strategy. +sampling-refresh-interval = 0 + +[opentracing.reporter] +# QueueSize controls how many spans the reporter can keep in memory before it starts dropping +# new spans. The queue is continuously drained by a background go-routine, as fast as spans +# can be sent out of process. +queue-size = 0 + +# BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. +# It is generally not useful, as it only matters for very low traffic services. +buffer-flush-interval = 0 + +# LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter +# and logs all submitted spans. Main Configuration.Logger must be initialized in the code +# for this option to have any effect. +log-spans = false + +# LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address +local-agent-host-port = "" + +[tikv-client] +# Max gRPC connections that will be established with each tikv-server. +grpc-connection-count = 4 + +# After a duration of this time in seconds if the client doesn't see any activity it pings +# the server to see if the transport is still alive. +grpc-keepalive-time = 10 + +# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds +# and if no activity is seen even after that the connection is closed. +grpc-keepalive-timeout = 3 + +# Max time for commit command, must be twice bigger than raft election timeout. +commit-timeout = "41s" + +# Max batch size in gRPC. +max-batch-size = 128 +# Overload threshold of TiKV. +overload-threshold = 200 +# Max batch wait time in nanosecond to avoid waiting too long. 0 means disable this feature. +max-batch-wait-time = 0 +# Batch wait size, to avoid waiting too long. +batch-wait-size = 8 + +# Enable chunk encoded data for coprocessor requests. +enable-chunk-rpc = true + +# If a Region has not been accessed for more than the given duration (in seconds), it +# will be reloaded from the PD. +region-cache-ttl = 600 + +# store-limit is used to restrain TiDB from sending request to some stores which is up to the limit. +# If a store has been up to the limit, it will return error for the successive request in same store. +# default 0 means shutting off store limit. +store-limit = 0 + +# store-liveness-timeout is used to control timeout for store liveness after sending request failed. +store-liveness-timeout = "120s" + +[tikv-client.copr-cache] +# Whether to enable the copr cache. The copr cache saves the result from TiKV Coprocessor in the memory and +# reuses the result when corresponding data in TiKV is unchanged, on a region basis. +enabled = true + +# The capacity in MB of the cache. +capacity-mb = 1000.0 + +# Only cache requests whose result set is small. +admission-max-result-mb = 10.0 +# Only cache requests takes notable time to process. +admission-min-process-ms = 5 + +[binlog] +# enable to write binlog. +# NOTE: If binlog is enabled with Kafka (e.g. arbiter cluster), +# txn-total-size-limit should be less than 1073741824(1G) because this is the maximum size that can be handled by Kafka. +enable = false + +# WriteTimeout specifies how long it will wait for writing binlog to pump. +write-timeout = "15s" + +# If IgnoreError is true, when writing binlog meets error, TiDB would stop writing binlog, +# but still provide service. +ignore-error = false + +# use socket file to write binlog, for compatible with kafka version tidb-binlog. +binlog-socket = "" + +# the strategy for sending binlog to pump, value can be "range" or "hash" now. +strategy = "range" + +[pessimistic-txn] +# enable pessimistic transaction. +enable = true + +# max retry count for a statement in a pessimistic transaction. +max-retry-count = 256 + +[stmt-summary] +# enable statement summary. +enable = true + +# enable statement summary for TiDB internal query, default is false. +enable-internal-query = false + +# max number of statements kept in memory. +max-stmt-count = 200 + +# max length of displayed normalized sql and sample sql. +max-sql-length = 4096 + +# the refresh interval of statement summary, it's counted in seconds. +refresh-interval = 1800 + +# the maximum history size of statement summary. +history-size = 24 + +# experimental section controls the features that are still experimental: their semantics, +# interfaces are subject to change, using these features in the production environment is not recommended. +[experimental] +# enable column attribute `auto_random` to be defined on the primary key column. +allow-auto-random = false +# enable creating expression index. +allow-expression-index = false + +# server level isolation read by engines and labels +[isolation-read] +# engines means allow the tidb server read data from which types of engines. options: "tikv", "tiflash", "tidb". +engines = ["tikv", "tiflash", "tidb"] \ No newline at end of file From 1cfc1d22b5e9673826245914e75b060ea71a2920 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Wed, 27 May 2020 14:04:45 +0800 Subject: [PATCH 2/4] Update config.config --- cc/config.config | 1235 +++++++++++++++++++++++++++++++--------------- 1 file changed, 841 insertions(+), 394 deletions(-) diff --git a/cc/config.config b/cc/config.config index 917e3e5e28..80292805ba 100644 --- a/cc/config.config +++ b/cc/config.config @@ -1,423 +1,870 @@ -# TiDB Configuration. +## TiKV config template +## Human-readable big numbers: +## File size(based on byte): KB, MB, GB, TB, PB +## e.g.: 1_048_576 = "1MB" +## Time(based on ms): ms, s, m, h +## e.g.: 78_000 = "1.3m" -# TiDB server host. -host = "0.0.0.0" +## Log levels: trace, debug, info, warning, error, critical. +## Note that `debug` and `trace` are only available in development builds. +# log-level = "info" -# tidb server advertise IP. -advertise-address = "" +## File to store logs. +## If it is not set, logs will be appended to stderr. +# log-file = "" -# TiDB server port. -port = 4000 +## File to store slow logs. +## If "log-file" is set, but this is not set, the slow logs will be appeneded +## to "log-file". If both "log-file" and "slow-log-file" are not set, all logs +## will be appended to stderr. +# slow-log-file = "" -# Registered store name, [tikv, mocktikv] -store = "mocktikv" +## The minimum operation cost to output relative logs. +# slow-log-threshold = "1s" -# TiDB storage path. -path = "/tmp/tidb" +## Timespan between rotating the log files. +## Once this timespan passes, log files will be rotated, i.e. existing log file will have a +## timestamp appended to its name and a new file will be created. +# log-rotation-timespan = "24h" -# The socket file to use for connection. -socket = "" +## Size of log file that triggers the log rotation. +## Once the size of log file exceeds the threshold value, the log file will be rotated +## and place the old log file in a new file named by orginal file name subbfixed by a timestamp. +# log-rotation-size = "300MB" -# Run ddl worker on this tidb-server. -run-ddl = true +# Configurations for the single thread pool serving read requests. +[readpool.unified] +## The minimal working thread count of the thread pool. +# min-thread-count = 1 -# Schema lease duration, very dangerous to change only if you know what you do. -lease = "45s" +## The maximum working thread count of the thread pool. +## The default value is max(4, LOGICAL_CPU_NUM * 0.8). +# max-thread-count = 8 -# When create table, split a separated region for it. It is recommended to -# turn off this option if there will be a large number of tables created. -split-table = true +## Size of the stack for each thread in the thread pool. +# stack-size = "10MB" -# The limit of concurrent executed sessions. -token-limit = 1000 +## Max running tasks of each worker, reject if exceeded. +# max-tasks-per-worker = 2000 -# The maximum memory available for a single SQL statement. Default: 1GB -mem-quota-query = 1073741824 +[readpool.storage] +## Whether to use the unified read pool to handle storage requests. +# use-unified-pool = false -# Controls whether to enable the temporary storage for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. -oom-use-tmp-storage = true +## The following configurations only take effect when `use-unified-pool` is false. -# Specifies the temporary storage path for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. -# It defaults to a generated directory in `/_tidb/` if it is unset. -# It only takes effect when `oom-use-tmp-storage` is `true`. -# tmp-storage-path = "/tmp/_tidb/MC4wLjAuMDo0MDAwLzAuMC4wLjA6MTAwODA=/tmp-storage" +## Size of the thread pool for high-priority operations. +# high-concurrency = 4 -# Specifies the maximum use of temporary storage (bytes) for all active queries when `oom-use-tmp-storage` is enabled. -# If the `tmp-storage-quota` exceeds the capacity of the temporary storage directory, tidb-server would return an error and exit. -# The default value of tmp-storage-quota is under 0 which means tidb-server wouldn't check the capacity. -tmp-storage-quota = -1 +## Size of the thread pool for normal-priority operations. +# normal-concurrency = 4 -# Specifies what operation TiDB performs when a single SQL statement exceeds the memory quota specified by mem-quota-query and cannot be spilled over to disk. -# Valid options: ["log", "cancel"] -oom-action = "cancel" +## Size of the thread pool for low-priority operations. +# low-concurrency = 4 -# Enable coprocessor streaming. -enable-streaming = false +## Max running high-priority operations of each worker, reject if exceeded. +# max-tasks-per-worker-high = 2000 -# Enable batch commit for the DMLs. -enable-batch-dml = false +## Max running normal-priority operations of each worker, reject if exceeded. +# max-tasks-per-worker-normal = 2000 -# Set system variable 'lower_case_table_names' -lower-case-table-names = 2 +## Max running low-priority operations of each worker, reject if exceeded. +# max-tasks-per-worker-low = 2000 -# Make "kill query" behavior compatible with MySQL. It's not recommend to -# turn on this option when TiDB server is behind a proxy. -compatible-kill-query = false +## Size of the stack for each thread in the thread pool. +# stack-size = "10MB" -# check mb4 value in utf8 is used to control whether to check the mb4 characters when the charset is utf8. -check-mb4-value-in-utf8 = true +[readpool.coprocessor] +## Whether to use the unified read pool to handle coprocessor requests. +# use-unified-pool = true -# treat-old-version-utf8-as-utf8mb4 use for upgrade compatibility. Set to true will treat old version table/column UTF8 charset as UTF8MB4. -treat-old-version-utf8-as-utf8mb4 = true +## The following configurations only take effect when `use-unified-pool` is false. -# max-index-length is used to deal with compatibility issues from v3.0.7 and previous version upgrades. It can only be in [3072, 3072*4]. -max-index-length = 3072 +## Most read requests from TiDB are sent to the coprocessor of TiKV. high/normal/low-concurrency is +## used to set the number of threads of the coprocessor. +## If there are many read requests, you can increase these config values (but keep it within the +## number of system CPU cores). For example, for a 32-core machine deployed with TiKV, you can even +## set these config to 30 in heavy read scenarios. +## If CPU_NUM > 8, the default thread pool size for coprocessors is set to CPU_NUM * 0.8. -# enable-table-lock is used to control table lock feature. Default is false, indicate the table lock feature is disabled. -enable-table-lock = false +# high-concurrency = 8 +# normal-concurrency = 8 +# low-concurrency = 8 +# max-tasks-per-worker-high = 2000 +# max-tasks-per-worker-normal = 2000 +# max-tasks-per-worker-low = 2000 -# delay-clean-table-lock is used to control the time (Milliseconds) of delay before unlock the table in the abnormal situation. -delay-clean-table-lock = 0 +[server] +## Listening address. +# addr = "127.0.0.1:20160" + +## Advertise listening address for client communication. +## If not set, `addr` will be used. +# advertise-addr = "" -# Maximum number of the splitting region, which is used by the split region statement. -split-region-max-num = 1000 - -# alter-primary-key is used to control alter primary key feature. Default is false, indicate the alter primary key feature is disabled. -# If it is true, we can add the primary key by "alter table". However, if a table already exists before the switch is turned true and the data type of its primary key column is an integer, -# the primary key cannot be dropped. -alter-primary-key = false - -# server-version is used to change the version string of TiDB in the following scenarios: -# 1. the server version returned by builtin-function `VERSION()`. -# 2. the server version filled in handshake packets of MySQL Connection Protocol, see https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake for more details. -# if server-version = "", the default value(original TiDB version string) is used. -server-version = "" - -# repair mode is used to repair the broken table meta in TiKV in extreme cases. -repair-mode = false - -# Repair table list is used to list the tables in repair mode with the format like ["db.table",]. -# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error. -repair-table-list = [] - -# The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited. -max-server-connections = 0 - -# Whether new collations are enabled, as indicated by its name, this configuration entry take effect ONLY when a TiDB cluster bootstraps for the first time. -new_collations_enabled_on_first_bootstrap = false - -[log] -# Log level: debug, info, warn, error, fatal. -level = "info" - -# Log format, one of json, text, console. -format = "text" - -# Enable automatic timestamps in log output, if not set, it will be defaulted to true. -# enable-timestamp = true - -# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false. -# enable-error-stack = false - -# Whether to enable slow query log. -enable-slow-log = true - -# Stores slow query log into separated files. -slow-query-file = "tidb-slow.log" - -# Queries with execution time greater than this value will be logged. (Milliseconds) -slow-threshold = 300 - -# record-plan-in-slow-log is used to enable record query plan in slow log. -# 0 is disable. 1 is enable. -record-plan-in-slow-log = 1 - -# Queries with internal result greater than this value will be logged. -expensive-threshold = 10000 - -# Maximum query length recorded in log. -query-log-max-len = 4096 - -# File logging. -[log.file] -# Log file name. -filename = "" - -# Max log file size in MB (upper limit to 4096MB). -max-size = 300 - -# Max log file keep days. No clean up by default. -max-days = 0 - -# Maximum number of old log files to retain. No clean up by default. -max-backups = 0 +## Status address. +## This is used for reporting the status of TiKV directly through +## the HTTP address. Notice that there is a risk of leaking status +## information if this port is exposed to the public. +## Empty string means disabling it. +# status-addr = "127.0.0.1:20180" + +## Set the maximum number of worker threads for the status report HTTP service. +# status-thread-pool-size = 1 + +## Compression type for gRPC channel: none, deflate or gzip. +# grpc-compression-type = "none" + +## Size of the thread pool for the gRPC server. +# grpc-concurrency = 4 + +## The number of max concurrent streams/requests on a client connection. +# grpc-concurrent-stream = 1024 + +## Limit the memory size can be used by gRPC. Default is unlimited. +## gRPC usually works well to reclaim memory by itself. Limit the memory in case OOM +## is observed. Note that limit the usage can lead to potential stall. +# grpc-memory-pool-quota = "32G" + +## The number of connections with each TiKV server to send Raft messages. +# grpc-raft-conn-num = 1 + +## Amount to read ahead on individual gRPC streams. +# grpc-stream-initial-window-size = "2MB" + +## Time to wait before sending out a ping to check if server is still alive. +## This is only for communications between TiKV instances. +# grpc-keepalive-time = "10s" + +## Time to wait before closing the connection without receiving KeepAlive ping Ack. +# grpc-keepalive-timeout = "3s" + +## How many snapshots can be sent concurrently. +# concurrent-send-snap-limit = 32 + +## How many snapshots can be received concurrently. +# concurrent-recv-snap-limit = 32 + +## Max allowed recursion level when decoding Coprocessor DAG expression. +# end-point-recursion-limit = 1000 + +## Max time to handle Coprocessor requests before timeout. +# end-point-request-max-handle-duration = "60s" + +## Max bytes that snapshot can be written to disk in one second. +## It should be set based on your disk performance. +# snap-max-write-bytes-per-sec = "100MB" + +## Whether to enable request batch. +# enable-request-batch = true + +## Whether to collect batch across commands. +## When disabled, wait duration is ignored. When enabled, collect batch for specified duration +## when load is high. +# request-batch-enable-cross-command = true + +## Wait duration before each request batch is processed. Wait is triggered when cross-command +## option is enabled and system load is high. +# request-batch-wait-duration = "1ms" + +## Attributes about this server, e.g. `{ zone = "us-west-1", disk = "ssd" }`. +# labels = {} + +[storage] +## The path to RocksDB directory. +# data-dir = "/tmp/tikv/store" + +## The number of slots in Scheduler latches, which controls write concurrency. +## In most cases you can use the default value. When importing data, you can set it to a larger +## value. +# scheduler-concurrency = 2048000 + +## Scheduler's worker pool size, i.e. the number of write threads. +## It should be less than total CPU cores. When there are frequent write operations, set it to a +## higher value. More specifically, you can run `top -H -p tikv-pid` to check whether the threads +## named `sched-worker-pool` are busy. +# scheduler-worker-pool-size = 4 + +## When the pending write bytes exceeds this threshold, the "scheduler too busy" error is displayed. +# scheduler-pending-write-threshold = "100MB" + +[storage.block-cache] +## Whether to create a shared block cache for all RocksDB column families. +## +## Block cache is used by RocksDB to cache uncompressed blocks. Big block cache can speed up read. +## It is recommended to turn on shared block cache. Since only the total cache size need to be +## set, it is easier to config. In most cases it should be able to auto-balance cache usage +## between column families with standard LRU algorithm. +## +## The rest of config in the storage.block-cache session is effective only when shared block cache +## is on. +# shared = true + +## Size of the shared block cache. Normally it should be tuned to 30%-50% of system's total memory. +## When the config is not set, it is decided by the sum of the following fields or their default +## value: +## * rocksdb.defaultcf.block-cache-size or 25% of system's total memory +## * rocksdb.writecf.block-cache-size or 15% of system's total memory +## * rocksdb.lockcf.block-cache-size or 2% of system's total memory +## * raftdb.defaultcf.block-cache-size or 2% of system's total memory +## +## To deploy multiple TiKV nodes on a single physical machine, configure this parameter explicitly. +## Otherwise, the OOM problem might occur in TiKV. +# capacity = "1GB" + +[pd] +## PD endpoints. +# endpoints = [] + +## The interval at which to retry a PD connection initialization. +## Default is 300ms. +# retry-interval = "300ms" + +## If the client observes an error, it can can skip reporting it except every `n` times. +## Set to 1 to disable this feature. +## Default is 10. +# retry-log-every = 10 + +## The maximum number of times to retry a PD connection initialization. +## Set to 0 to disable retry. +## Default is -1, meaning isize::MAX times. +# retry-max-count = -1 + +[raftstore] +## Whether to force to flush logs. +## Set to `true` (default) for best reliability, which prevents data loss when there is a power +## failure. Set to `false` for higher performance (ensure that you run multiple TiKV nodes!). +# sync-log = true + +## Whether to enable Raft prevote. +## Prevote minimizes disruption when a partitioned node rejoins the cluster by using a two phase +## election. +# prevote = true + +## The path to RaftDB directory. +## If not set, it will be `{data-dir}/raft`. +## If there are multiple disks on the machine, storing the data of Raft RocksDB on differen disks +## can improve TiKV performance. +# raftdb-path = "" + +## Store capacity, i.e. max data size allowed. +## If it is not set, disk capacity is used. +# capacity = 0 + +## Internal notify capacity. +## 40960 is suitable for about 7000 Regions. It is recommended to use the default value. +# notify-capacity = 40960 + +## Maximum number of internal messages to process in a tick. +# messages-per-tick = 4096 + +## Region heartbeat tick interval for reporting to PD. +# pd-heartbeat-tick-interval = "60s" + +## Store heartbeat tick interval for reporting to PD. +# pd-store-heartbeat-tick-interval = "10s" + +## The threshold of triggering Region split check. +## When Region size change exceeds this config, TiKV will check whether the Region should be split +## or not. To reduce the cost of scanning data in the checking process, you can set the value to +## 32MB during checking and set it back to the default value in normal operations. +# region-split-check-diff = "6MB" + +## The interval of triggering Region split check. +# split-region-check-tick-interval = "10s" + +## When the number of Raft entries exceeds the max size, TiKV rejects to propose the entry. +# raft-entry-max-size = "8MB" + +## Interval to GC unnecessary Raft log. +# raft-log-gc-tick-interval = "10s" + +## Threshold to GC stale Raft log, must be >= 1. +# raft-log-gc-threshold = 50 + +## When the entry count exceeds this value, GC will be forced to trigger. +# raft-log-gc-count-limit = 72000 + +## When the approximate size of Raft log entries exceeds this value, GC will be forced trigger. +## It's recommanded to set it to 3/4 of `region-split-size`. +# raft-log-gc-size-limit = "72MB" + +## How long the peer will be considered down and reported to PD when it hasn't been active for this +## time. +# max-peer-down-duration = "5m" + +## Interval to check whether to start manual compaction for a Region. +# region-compact-check-interval = "5m" + +## Number of Regions for each time to check. +# region-compact-check-step = 100 + +## The minimum number of delete tombstones to trigger manual compaction. +# region-compact-min-tombstones = 10000 + +## The minimum percentage of delete tombstones to trigger manual compaction. +## It should be set between 1 and 100. Manual compaction is only triggered when the number of +## delete tombstones exceeds `region-compact-min-tombstones` and the percentage of delete tombstones +## exceeds `region-compact-tombstones-percent`. +# region-compact-tombstones-percent = 30 + +## Interval to check whether to start a manual compaction for Lock Column Family. +## If written bytes reach `lock-cf-compact-bytes-threshold` for Lock Column Family, TiKV will +## trigger a manual compaction for Lock Column Family. +# lock-cf-compact-interval = "10m" +# lock-cf-compact-bytes-threshold = "256MB" + +## Interval (s) to check Region whether the data are consistent. +# consistency-check-interval = 0 + +## Delay time before deleting a stale peer. +# clean-stale-peer-delay = "10m" + +## Interval to clean up import SST files. +# cleanup-import-sst-interval = "10m" + +## Use how many threads to handle log apply +# apply-pool-size = 2 + +## Use how many threads to handle raft messages +# store-pool-size = 2 + +[coprocessor] +## When it is set to `true`, TiKV will try to split a Region with table prefix if that Region +## crosses tables. +## It is recommended to turn off this option if there will be a large number of tables created. +# split-region-on-table = false + +## One split check produces several split keys in batch. This config limits the number of produced +## split keys in one batch. +# batch-split-limit = 10 + +## When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b), +## [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a +## little larger). +# region-max-size = "144MB" +# region-split-size = "96MB" + +## When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into +## several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be +## `region_split_keys`. +# region-max-keys = 1440000 +# region-split-keys = 960000 + +[rocksdb] +## Maximum number of threads of RocksDB background jobs. +## The background tasks include compaction and flush. For detailed information why RocksDB needs to +## do compaction, see RocksDB-related materials. RocksDB will adjust flush and compaction threads +## according to the formula: +## max_flushes = max_flushes = max(1, max_background_jobs / 4) +## max_compactions = max(1, max_background_jobs - max_flushes) +## When write traffic (like the importing data size) is big, it is recommended to enable more +## threads. But set the number of the enabled threads smaller than that of CPU cores. For example, +## when importing data, for a machine with a 32-core CPU, set the value to 28. +## The default value is set to 8 or CPU_NUM - 1, whichever is smaller. +# max-background-jobs = 8 + +## Represents the maximum number of threads that will concurrently perform a sub-compaction job by +## breaking it into multiple, smaller ones running simultaneously. +## The default value is set to 3 or the largest number to allow for two compactions, whichever is +## smaller. +# max-sub-compactions = 3 + +## Number of open files that can be used by the DB. +## Value -1 means files opened are always kept open and RocksDB will prefetch index and filter +## blocks into block cache at startup. So if your database has a large working set, it will take +## several minutes to open the DB. You may need to increase this if your database has a large +## working set. You can estimate the number of files based on `target-file-size-base` and +## `target_file_size_multiplier` for level-based compaction. +# max-open-files = 40960 + +## Max size of RocksDB's MANIFEST file. +## For detailed explanation, please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST +# max-manifest-file-size = "128MB" + +## If the value is `true`, the database will be created if it is missing. +# create-if-missing = true + +## RocksDB Write-Ahead Logs (WAL) recovery mode. +## 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs; +## 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL; +## 2 : PointInTimeRecovery, Recover to point-in-time consistency; +## 3 : SkipAnyCorruptedRecords, Recovery after a disaster; +# wal-recovery-mode = 2 + +## RocksDB WAL directory. +## This config specifies the absolute directory path for WAL. +## If it is not set, the log files will be in the same directory as data. When you set the path to +## RocksDB directory in memory like in `/dev/shm`, you may want to set`wal-dir` to a directory on a +## persistent storage. See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database . +## If there are two disks on the machine, storing RocksDB data and WAL logs on different disks can +## improve performance. +# wal-dir = "/tmp/tikv/store" + +## The following two fields affect how archived WAL will be deleted. +## 1. If both values are set to 0, logs will be deleted ASAP and will not get into the archive. +## 2. If `wal-ttl-seconds` is 0 and `wal-size-limit` is not 0, WAL files will be checked every 10 +## min and if total size is greater than `wal-size-limit`, they will be deleted starting with the +## earliest until `wal-size-limit` is met. All empty files will be deleted. +## 3. If `wal-ttl-seconds` is not 0 and `wal-size-limit` is 0, then WAL files will be checked every +## `wal-ttl-seconds / 2` and those that are older than `wal-ttl-seconds` will be deleted. +## 4. If both are not 0, WAL files will be checked every 10 min and both checks will be performed +## with ttl being first. +## When you set the path to RocksDB directory in memory like in `/dev/shm`, you may want to set +## `wal-ttl-seconds` to a value greater than 0 (like 86400) and backup your DB on a regular basis. +## See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database . +# wal-ttl-seconds = 0 +# wal-size-limit = 0 + +## Max RocksDB WAL size in total +# max-total-wal-size = "4GB" + +## RocksDB Statistics provides cumulative stats over time. +## Turning statistics on will introduce about 5%-10% overhead for RocksDB, but it can help you to +## know the internal status of RocksDB. +# enable-statistics = true + +## Dump statistics periodically in information logs. +## Same as RocksDB's default value (10 min). +# stats-dump-period = "10m" + +## Refer to: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ +## If you want to use RocksDB on multi disks or spinning disks, you should set value at least 2MB. +# compaction-readahead-size = 0 + +## Max buffer size that is used by WritableFileWrite. +# writable-file-max-buffer-size = "1MB" + +## Use O_DIRECT for both reads and writes in background flush and compactions. +# use-direct-io-for-flush-and-compaction = false + +## Limit the disk IO of compaction and flush. +## Compaction and flush can cause terrible spikes if they exceed a certain threshold. Consider +## setting this to 50% ~ 80% of the disk throughput for a more stable result. However, in heavy +## write workload, limiting compaction and flush speed can cause write stalls too. +## 1. rate-bytes-per-sec is the only parameter you want to set most of the time. It controls the +## total write rate of compaction and flush in bytes per second. Currently, RocksDB does not +## enforce rate limit for anything other than flush and compaction, e.g. write to WAL. +## 2. rate-limiter-mode indicates which types of operations count against the limit. +## 1 : ReadOnly +## 2 : WriteOnly +## 3 : AllIo +## 3. auto_tuned enables dynamic adjustment of rate limit within the range +## [rate_bytes_per_sec / 20, rate_bytes_per_sec], according to the recent demand for background I/O. +# rate-bytes-per-sec = 0 +# rate-limiter-mode = 2 +# auto-tuned = false + + +## Enable or disable the pipelined write. +# enable-pipelined-write = true + +## Allows OS to incrementally sync files to disk while they are being written, asynchronously, +## in the background. +# bytes-per-sync = "1MB" + +## Allows OS to incrementally sync WAL to disk while it is being written. +# wal-bytes-per-sync = "512KB" + +## Specify the maximal size of the RocksDB info log file. +## If the log file is larger than this config, a new info log file will be created. +## If it is set to 0, all logs will be written to one log file. +# info-log-max-size = "1GB" + +## Time for the RocksDB info log file to roll (in seconds). +## If the log file has been active longer than this config, it will be rolled. +## If it is set to 0, rolling will be disabled. +# info-log-roll-time = "0" + +## Maximal RocksDB info log files to be kept. +# info-log-keep-log-file-num = 10 + +## Specifies the RocksDB info log directory. +## If it is empty, the log files will be in the same directory as data. +## If it is not empty, the log files will be in the specified directory, and the DB data directory's +## absolute path will be used as the log file name's prefix. +# info-log-dir = "" + +## Options for `Titan`. +[rocksdb.titan] +## Enables or disables `Titan`. Note that Titan is still an experimental feature. Once +## enabled, it can't fall back. Forced fallback may result in data loss. +## default: false +# enabled = false + +## Maximum number of threads of `Titan` background gc jobs. +# default: 4 +# max-background-gc = 4 + +## Options for "Default" Column Family, which stores actual user data. +[rocksdb.defaultcf] +## Compression method (if any) is used to compress a block. +## no: kNoCompression +## snappy: kSnappyCompression +## zlib: kZlibCompression +## bzip2: kBZip2Compression +## lz4: kLZ4Compression +## lz4hc: kLZ4HCCompression +## zstd: kZSTD +## `lz4` is a compression algorithm with moderate speed and compression ratio. The compression +## ratio of `zlib` is high. It is friendly to the storage space, but its compression speed is +## slow. This compression occupies many CPU resources. + +## Per level compression. +## This config should be chosen carefully according to CPU and I/O resources. For example, if you +## use the compression mode of "no:no:lz4:lz4:lz4:zstd:zstd" and find much I/O pressure of the +## system (run the `iostat` command to find %util lasts 100%, or run the `top` command to find many +## iowaits) when writing (importing) a lot of data while the CPU resources are adequate, you can +## compress level-0 and level-1 and exchange CPU resources for I/O resources. If you use the +## compression mode of "no:no:lz4:lz4:lz4:zstd:zstd" and you find the I/O pressure of the system is +## not big when writing a lot of data, but CPU resources are inadequate. Then run the `top` command +## and choose the `-H` option. If you find a lot of bg threads (namely the compression thread of +## RocksDB) are running, you can exchange I/O resources for CPU resources and change the compression +## mode to "no:no:no:lz4:lz4:zstd:zstd". In a word, it aims at making full use of the existing +## resources of the system and improving TiKV performance in terms of the current resources. +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] + +## The data block size. RocksDB compresses data based on the unit of block. +## Similar to page in other databases, block is the smallest unit cached in block-cache. Note that +## the block size specified here corresponds to uncompressed data. +# block-size = "64KB" + +## If you're doing point lookups you definitely want to turn bloom filters on. We use bloom filters +## to avoid unnecessary disk reads. Default bits_per_key is 10, which yields ~1% false positive +## rate. Larger `bloom-filter-bits-per-key` values will reduce false positive rate, but increase +## memory usage and space amplification. +# bloom-filter-bits-per-key = 10 + +## `false` means one SST file one bloom filter, `true` means every block has a corresponding bloom +## filter. +# block-based-bloom-filter = false + +# level0-file-num-compaction-trigger = 4 + +## Soft limit on number of level-0 files. +## When the number of SST files of level-0 reaches the limit of `level0-slowdown-writes-trigger`, +## RocksDB tries to slow down the write operation, because too many SST files of level-0 can cause +## higher read pressure of RocksDB. +# level0-slowdown-writes-trigger = 20 + +## Maximum number of level-0 files. +## When the number of SST files of level-0 reaches the limit of `level0-stop-writes-trigger`, +## RocksDB stalls the new write operation. +# level0-stop-writes-trigger = 36 + +## Amount of data to build up in memory (backed by an unsorted log on disk) before converting to a +## sorted on-disk file. It is the RocksDB MemTable size. +# write-buffer-size = "128MB" + +## The maximum number of the MemTables. The data written into RocksDB is first recorded in the WAL +## log, and then inserted into MemTables. When the MemTable reaches the size limit of +## `write-buffer-size`, it turns into read only and generates a new MemTable receiving new write +## operations. The flush threads of RocksDB will flush the read only MemTable to the disks to become +## an SST file of level0. `max-background-flushes` controls the maximum number of flush threads. +## When the flush threads are busy, resulting in the number of the MemTables waiting to be flushed +## to the disks reaching the limit of `max-write-buffer-number`, RocksDB stalls the new operation. +## "Stall" is a flow control mechanism of RocksDB. When importing data, you can set the +## `max-write-buffer-number` value higher, like 10. +# max-write-buffer-number = 5 + +## The minimum number of write buffers that will be merged together before writing to storage. +# min-write-buffer-number-to-merge = 1 + +## Control maximum total data size for base level (level 1). +## When the level-1 data size reaches the limit value of `max-bytes-for-level-base`, the SST files +## of level-1 and their overlap SST files of level-2 will be compacted. The golden rule: the first +## reference principle of setting `max-bytes-for-level-base` is guaranteeing that the +## `max-bytes-for-level-base` value is roughly equal to the data volume of level-0. Thus +## unnecessary compaction is reduced. For example, if the compression mode is +## "no:no:lz4:lz4:lz4:lz4:lz4", the `max-bytes-for-level-base` value can be `write-buffer-size * 4`, +## because there is no compression of level-0 and level-1 and the trigger condition of compaction +## for level-0 is that the number of the SST files reaches 4 (the default value). When both level-0 +## and level-1 adopt compaction, it is necessary to analyze RocksDB logs to know the size of an SST +## file compressed from a MemTable. For example, if the file size is 32MB, the proposed value of +## `max-bytes-for-level-base` is 32MB * 4 = 128MB. +# max-bytes-for-level-base = "512MB" + +## Target file size for compaction. +## The SST file size of level-0 is influenced by the compaction algorithm of `write-buffer-size` +## and level0. `target-file-size-base` is used to control the size of a single SST file of level1 to +## level6. +# target-file-size-base = "8MB" + +## Max bytes for `compaction.max_compaction_bytes`. +# max-compaction-bytes = "2GB" + +## There are four different compaction priorities. +## 0 : ByCompensatedSize +## 1 : OldestLargestSeqFirst +## 2 : OldestSmallestSeqFirst +## 3 : MinOverlappingRatio +# compaction-pri = 3 + +## Indicating if we'd put index/filter blocks to the block cache. +## If not specified, each "table reader" object will pre-load index/filter block during table +## initialization. +# cache-index-and-filter-blocks = true + +## Pin level-0 filter and index blocks in cache. +# pin-l0-filter-and-index-blocks = true + +## Enable read amplification statistics. +## value => memory usage (percentage of loaded blocks memory) +## 1 => 12.50 % +## 2 => 06.25 % +## 4 => 03.12 % +## 8 => 01.56 % +## 16 => 00.78 % +# read-amp-bytes-per-bit = 0 + +## Pick target size of each level dynamically. +# dynamic-level-bytes = true + +## Optimizes bloom filters. If true, RocksDB won't create bloom filters for the max level of +## the LSM to reduce metadata that should fit in RAM. +## This value is setted to true for `default` cf by default because its kv data could be determined +## whether really exists by upper logic instead of bloom filters. But we suggest to set it to false +## while using `Raw` mode. +# optimize-filters-for-hits = true + +## Options for "Default" Column Family for `Titan`. +[rocksdb.defaultcf.titan] +## The smallest value to store in blob files. Value smaller than +## this threshold will be inlined in base DB. +## default: 1KB +# min-blob-size = "1KB" + +## The compression algorithm used to compress data in blob files. +## Compression method. +## no: kNoCompression +## snappy: kSnappyCompression +## zlib: kZlibCompression +## bzip2: kBZip2Compression +## lz4: kLZ4Compression +## lz4hc: kLZ4HCCompression +## zstd: kZSTD +# default: lz4 +# blob-file-compression = "lz4" + +## Specifics cache size for blob records +# default: 0 +# blob-cache-size = "0GB" + +## If the ratio of discardable size of a blob file is larger than +## this threshold, the blob file will be GCed out. +# default: 0.5 +# discardable-ratio = 0.5 + +## The mode used to process blob files. In read-only mode Titan +## stops writing value into blob log. In fallback mode Titan +## converts blob index into real value on flush and compaction. +## This option is especially useful for downgrading Titan. +## default: kNormal +## read-only: kReadOnly +## fallback: kFallback +# default: normal +# blob-run-mode = "normal" + +## If set true, values in blob file will be merged to a new blob file while +## their corresponding keys are compacted to last two level in LSM-Tree. +## +## With this feature enabled, Titan could get better scan performance, and +## better write performance during GC, but will suffer around 1.1 space +## amplification and 3 more write amplification if no GC needed (eg. uniformly +## distributed keys) under default rocksdb setting. +## +## Requirement: level_compaction_dynamic_level_base = true +## default: false +# level_merge = false + +## Use merge operator to rewrite GC blob index. +## default: false +# gc-merge-rewrite = false + +## Options for "Write" Column Family, which stores MVCC commit information +[rocksdb.writecf] +## Recommend to set it the same as `rocksdb.defaultcf.compression-per-level`. +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" + +## Recommend to set it the same as `rocksdb.defaultcf.write-buffer-size`. +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 + +## Recommend to set it the same as `rocksdb.defaultcf.max-bytes-for-level-base`. +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 3 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true +# optimize-filters-for-hits = false + +[rocksdb.lockcf] +# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"] +# block-size = "16KB" +# write-buffer-size = "32MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "128MB" +# target-file-size-base = "8MB" +# level0-file-num-compaction-trigger = 1 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true +# optimize-filters-for-hits = false + +[raftdb] +# max-background-jobs = 4 +# max-sub-compactions = 2 +# max-open-files = 40960 +# max-manifest-file-size = "20MB" +# create-if-missing = true + +# enable-statistics = true +# stats-dump-period = "10m" + +# compaction-readahead-size = 0 +# writable-file-max-buffer-size = "1MB" +# use-direct-io-for-flush-and-compaction = false +# enable-pipelined-write = true +# allow-concurrent-memtable-write = false +# bytes-per-sync = "1MB" +# wal-bytes-per-sync = "512KB" + +# info-log-max-size = "1GB" +# info-log-roll-time = "0" +# info-log-keep-log-file-num = 10 +# info-log-dir = "" +# optimize-filters-for-hits = true + +[raftdb.defaultcf] +## Recommend to set it the same as `rocksdb.defaultcf.compression-per-level`. +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" + +## Recommend to set it the same as `rocksdb.defaultcf.write-buffer-size`. +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 + +## Recommend to set it the same as `rocksdb.defaultcf.max-bytes-for-level-base`. +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true +# optimize-filters-for-hits = true [security] -# Path of file that contains list of trusted SSL CAs for connection with mysql client. -ssl-ca = "" - -# Path of file that contains X509 certificate in PEM format for connection with mysql client. -ssl-cert = "" - -# Path of file that contains X509 key in PEM format for connection with mysql client. -ssl-key = "" - -# Path of file that contains list of trusted SSL CAs for connection with cluster components. -cluster-ssl-ca = "" - -# Path of file that contains X509 certificate in PEM format for connection with cluster components. -cluster-ssl-cert = "" - -# Path of file that contains X509 key in PEM format for connection with cluster components. -cluster-ssl-key = "" - -[status] -# If enable status report HTTP service. -report-status = true - -# TiDB status host. -status-host = "0.0.0.0" - -## status-host is the HTTP address for reporting the internal status of a TiDB server, for example: -## API for prometheus: http://${status-host}:${status_port}/metrics -## API for pprof: http://${status-host}:${status_port}/debug/pprof -# TiDB status port. -status-port = 10080 - -# Prometheus pushgateway address, leaves it empty will disable push to pushgateway. -metrics-addr = "" - -# Prometheus client push interval in second, set \"0\" to disable push to pushgateway. -metrics-interval = 15 - -# Record statements qps by database name if it is enabled. -record-db-qps = false - -[performance] -# Max CPUs to use, 0 use number of CPUs in the machine. -max-procs = 0 - -# Max memory size to use, 0 use the total usable memory in the machine. -max-memory = 0 - -# StmtCountLimit limits the max count of statement inside a transaction. -stmt-count-limit = 5000 - -# Set keep alive option for tcp connection. -tcp-keep-alive = true - -# Whether support cartesian product. -cross-join = true - -# Stats lease duration, which influences the time of analyze and stats load. -stats-lease = "3s" - -# Run auto analyze worker on this tidb-server. -run-auto-analyze = true - -# Probability to use the query feedback to update stats, 0.0 or 1.0 for always false/true. -feedback-probability = 0.05 - -# The max number of query feedback that cache in memory. -query-feedback-limit = 1024 - -# Pseudo stats will be used if the ratio between the modify count and -# row count in statistics of a table is greater than it. -pseudo-estimate-ratio = 0.8 - -# Force the priority of all statements in a specified priority. -# The value could be "NO_PRIORITY", "LOW_PRIORITY", "HIGH_PRIORITY" or "DELAYED". -force-priority = "NO_PRIORITY" - -# Bind info lease duration, which influences the duration of loading bind info and handling invalid bind. -bind-info-lease = "3s" - -# Whether support pushing down aggregation with distinct to cop task -distinct-agg-push-down = false - -# The limitation of the size in byte for the entries in one transaction. -# If using TiKV as the storage, the entry represents a key/value pair. -# NOTE: If binlog is enabled with Kafka (e.g. arbiter cluster), -# this value should be less than 1073741824(1G) because this is the maximum size that can be handled by Kafka. -# If binlog is disabled or binlog is enabled without Kafka, this value should be less than 10737418240(10G). -txn-total-size-limit = 104857600 - -# The max number of running concurrency two phase committer request for an SQL. -committer-concurrency = 16 - -# max lifetime of transaction ttl manager. -max-txn-ttl = 600000 - -[proxy-protocol] -# PROXY protocol acceptable client networks. -# Empty string means disable PROXY protocol, * means all networks. -networks = "" - -# PROXY protocol header read timeout, unit is second -header-timeout = 5 - -[prepared-plan-cache] -enabled = false -capacity = 100 -memory-guard-ratio = 0.1 - -[opentracing] -# Enable opentracing. -enable = false - -# Whether to enable the rpc metrics. -rpc-metrics = false - -[opentracing.sampler] -# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote -type = "const" - -# Param is a value passed to the sampler. -# Valid values for Param field are: -# - for "const" sampler, 0 or 1 for always false/true respectively -# - for "probabilistic" sampler, a probability between 0 and 1 -# - for "rateLimiting" sampler, the number of spans per second -# - for "remote" sampler, param is the same as for "probabilistic" -# and indicates the initial sampling rate before the actual one -# is received from the mothership -param = 1.0 - -# SamplingServerURL is the address of jaeger-agent's HTTP sampling server -sampling-server-url = "" - -# MaxOperations is the maximum number of operations that the sampler -# will keep track of. If an operation is not tracked, a default probabilistic -# sampler will be used rather than the per operation specific sampler. -max-operations = 0 - -# SamplingRefreshInterval controls how often the remotely controlled sampler will poll -# jaeger-agent for the appropriate sampling strategy. -sampling-refresh-interval = 0 - -[opentracing.reporter] -# QueueSize controls how many spans the reporter can keep in memory before it starts dropping -# new spans. The queue is continuously drained by a background go-routine, as fast as spans -# can be sent out of process. -queue-size = 0 - -# BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. -# It is generally not useful, as it only matters for very low traffic services. -buffer-flush-interval = 0 - -# LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter -# and logs all submitted spans. Main Configuration.Logger must be initialized in the code -# for this option to have any effect. -log-spans = false - -# LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address -local-agent-host-port = "" - -[tikv-client] -# Max gRPC connections that will be established with each tikv-server. -grpc-connection-count = 4 - -# After a duration of this time in seconds if the client doesn't see any activity it pings -# the server to see if the transport is still alive. -grpc-keepalive-time = 10 - -# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds -# and if no activity is seen even after that the connection is closed. -grpc-keepalive-timeout = 3 - -# Max time for commit command, must be twice bigger than raft election timeout. -commit-timeout = "41s" - -# Max batch size in gRPC. -max-batch-size = 128 -# Overload threshold of TiKV. -overload-threshold = 200 -# Max batch wait time in nanosecond to avoid waiting too long. 0 means disable this feature. -max-batch-wait-time = 0 -# Batch wait size, to avoid waiting too long. -batch-wait-size = 8 - -# Enable chunk encoded data for coprocessor requests. -enable-chunk-rpc = true - -# If a Region has not been accessed for more than the given duration (in seconds), it -# will be reloaded from the PD. -region-cache-ttl = 600 - -# store-limit is used to restrain TiDB from sending request to some stores which is up to the limit. -# If a store has been up to the limit, it will return error for the successive request in same store. -# default 0 means shutting off store limit. -store-limit = 0 - -# store-liveness-timeout is used to control timeout for store liveness after sending request failed. -store-liveness-timeout = "120s" - -[tikv-client.copr-cache] -# Whether to enable the copr cache. The copr cache saves the result from TiKV Coprocessor in the memory and -# reuses the result when corresponding data in TiKV is unchanged, on a region basis. -enabled = true - -# The capacity in MB of the cache. -capacity-mb = 1000.0 - -# Only cache requests whose result set is small. -admission-max-result-mb = 10.0 -# Only cache requests takes notable time to process. -admission-min-process-ms = 5 - -[binlog] -# enable to write binlog. -# NOTE: If binlog is enabled with Kafka (e.g. arbiter cluster), -# txn-total-size-limit should be less than 1073741824(1G) because this is the maximum size that can be handled by Kafka. -enable = false - -# WriteTimeout specifies how long it will wait for writing binlog to pump. -write-timeout = "15s" - -# If IgnoreError is true, when writing binlog meets error, TiDB would stop writing binlog, -# but still provide service. -ignore-error = false - -# use socket file to write binlog, for compatible with kafka version tidb-binlog. -binlog-socket = "" - -# the strategy for sending binlog to pump, value can be "range" or "hash" now. -strategy = "range" +## The path for TLS certificates. Empty string means disabling secure connections. +# ca-path = "" +# cert-path = "" +# key-path = "" +# cert-allowed-cn = [] + +# Configurations for encryption at rest. Experimental. +[security.encryption] +## Encryption method to use for data files. +## Possible values are "plaintext", "aes128-ctr", "aes192-ctr" and "aes256-ctr". Value other than +## "plaintext" means encryption is enabled, in which case master key must be specified. +# data-encryption-method = "plaintext" + +## Specifies how often TiKV rotates data encryption key. +# data-key-rotation-period = "7d" + +## Specifies master key if encryption is enabled. There are three types of master key: +## +## * "plaintext": +## +## Plaintext as master key means no master key is given and only applicable when +## encryption is not enabled, i.e. data-encryption-method = "plaintext". This type doesn't +## have sub-config items. Example: +## +## [security.encryption.master-key] +## type = "plaintext" +## +## * "kms": +## +## Use a KMS service to supply master key. Currently only AWS KMS is supported. This type of +## master key is recommended for production use. Example: +## +## [security.encryption.master-key] +## type = "kms" +## ## KMS CMK key id. Must be a valid KMS CMK where the TiKV process has access to. +## ## In production is recommended to grant access of the CMK to TiKV using IAM. +## key-id = "1234abcd-12ab-34cd-56ef-1234567890ab" +## ## AWS region of the KMS CMK. +## region = "us-west-2" +## ## (Optional) AWS KMS service endpoint. Only required when non-default KMS endpoint is +## ## desired. +## endpoint = "https://kms.us-west-2.amazonaws.com" +## +## * "file": +## +## Supply a custom encryption key stored in a file. It is recommended NOT to use in production, +## as it breaks the purpose of encryption at rest, unless the file is stored in tempfs. +## The file must contain a 256-bits (32 bytes, regardless of key length implied by +## data-encryption-method) key encoded as hex string and end with newline ("\n"). Example: +## +## [security.encryption.master-key] +## type = "file" +## path = "/path/to/master/key/file" +## +[security.encryption.master-key] +# type = "plaintext" + +## Specifies the old master key when rotating master key. Same config format as master-key. +## The key is only access once during TiKV startup, after that TiKV do not need access to the key. +## And it is okay to leave the stale previous-master-key config after master key rotation. +[security.encryption.previous-master-key] +# type = "plaintext" + +[import] +## Number of threads to handle RPC requests. +# num-threads = 8 + +## Stream channel window size, stream will be blocked on channel full. +# stream-channel-window = 128 [pessimistic-txn] -# enable pessimistic transaction. -enable = true - -# max retry count for a statement in a pessimistic transaction. -max-retry-count = 256 - -[stmt-summary] -# enable statement summary. -enable = true - -# enable statement summary for TiDB internal query, default is false. -enable-internal-query = false - -# max number of statements kept in memory. -max-stmt-count = 200 - -# max length of displayed normalized sql and sample sql. -max-sql-length = 4096 - -# the refresh interval of statement summary, it's counted in seconds. -refresh-interval = 1800 - -# the maximum history size of statement summary. -history-size = 24 - -# experimental section controls the features that are still experimental: their semantics, -# interfaces are subject to change, using these features in the production environment is not recommended. -[experimental] -# enable column attribute `auto_random` to be defined on the primary key column. -allow-auto-random = false -# enable creating expression index. -allow-expression-index = false - -# server level isolation read by engines and labels -[isolation-read] -# engines means allow the tidb server read data from which types of engines. options: "tikv", "tiflash", "tidb". -engines = ["tikv", "tiflash", "tidb"] \ No newline at end of file +## Enable pessimistic transaction +# enabled = true + +## The default and maximum delay before responding to TiDB when pessimistic +## transactions encounter locks +# wait-for-lock-timeout = "1s" + +## If more than one transaction is waiting for the same lock, only the one with smallest +## start timestamp will be waked up immediately when the lock is released. Others will +## be waked up after `wake_up_delay_duration` to reduce contention and make the oldest +## one more likely acquires the lock. +# wake-up-delay-duration = "20ms" + +## Enable pipelined pessimistic lock, only effect when processing perssimistic transactions +## Enabled this will improve performance, but slightly increase the transcation failure rate +# pipelined = false + +[gc] +## The number of keys to GC in one batch. +# batch-keys = 512 + +## Max bytes that GC worker can write to rocksdb in one second. +## If it is set to 0, there is no limit. +# max-write-bytes-per-sec = "0" \ No newline at end of file From e2fe60638469c351869d6feaf81022abcbec05cc Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Wed, 27 May 2020 14:06:13 +0800 Subject: [PATCH 3/4] Update config.config --- cc/config.config | 964 +++++------------------------------------------ 1 file changed, 102 insertions(+), 862 deletions(-) diff --git a/cc/config.config b/cc/config.config index 80292805ba..75f2cd2794 100644 --- a/cc/config.config +++ b/cc/config.config @@ -1,870 +1,110 @@ -## TiKV config template -## Human-readable big numbers: -## File size(based on byte): KB, MB, GB, TB, PB -## e.g.: 1_048_576 = "1MB" -## Time(based on ms): ms, s, m, h -## e.g.: 78_000 = "1.3m" +# PD Configuration. -## Log levels: trace, debug, info, warning, error, critical. -## Note that `debug` and `trace` are only available in development builds. -# log-level = "info" +name = "pd" +data-dir = "default.pd" -## File to store logs. -## If it is not set, logs will be appended to stderr. -# log-file = "" +client-urls = "http://127.0.0.1:2379" +## if not set, use ${client-urls} +advertise-client-urls = "" -## File to store slow logs. -## If "log-file" is set, but this is not set, the slow logs will be appeneded -## to "log-file". If both "log-file" and "slow-log-file" are not set, all logs -## will be appended to stderr. -# slow-log-file = "" +peer-urls = "http://127.0.0.1:2380" +## if not set, use ${peer-urls} +advertise-peer-urls = "" -## The minimum operation cost to output relative logs. -# slow-log-threshold = "1s" +initial-cluster = "pd=http://127.0.0.1:2380" +initial-cluster-state = "new" -## Timespan between rotating the log files. -## Once this timespan passes, log files will be rotated, i.e. existing log file will have a -## timestamp appended to its name and a new file will be created. -# log-rotation-timespan = "24h" +lease = 3 +tso-save-interval = "3s" -## Size of log file that triggers the log rotation. -## Once the size of log file exceeds the threshold value, the log file will be rotated -## and place the old log file in a new file named by orginal file name subbfixed by a timestamp. -# log-rotation-size = "300MB" - -# Configurations for the single thread pool serving read requests. -[readpool.unified] -## The minimal working thread count of the thread pool. -# min-thread-count = 1 - -## The maximum working thread count of the thread pool. -## The default value is max(4, LOGICAL_CPU_NUM * 0.8). -# max-thread-count = 8 - -## Size of the stack for each thread in the thread pool. -# stack-size = "10MB" - -## Max running tasks of each worker, reject if exceeded. -# max-tasks-per-worker = 2000 - -[readpool.storage] -## Whether to use the unified read pool to handle storage requests. -# use-unified-pool = false - -## The following configurations only take effect when `use-unified-pool` is false. - -## Size of the thread pool for high-priority operations. -# high-concurrency = 4 - -## Size of the thread pool for normal-priority operations. -# normal-concurrency = 4 - -## Size of the thread pool for low-priority operations. -# low-concurrency = 4 - -## Max running high-priority operations of each worker, reject if exceeded. -# max-tasks-per-worker-high = 2000 - -## Max running normal-priority operations of each worker, reject if exceeded. -# max-tasks-per-worker-normal = 2000 - -## Max running low-priority operations of each worker, reject if exceeded. -# max-tasks-per-worker-low = 2000 - -## Size of the stack for each thread in the thread pool. -# stack-size = "10MB" - -[readpool.coprocessor] -## Whether to use the unified read pool to handle coprocessor requests. -# use-unified-pool = true - -## The following configurations only take effect when `use-unified-pool` is false. - -## Most read requests from TiDB are sent to the coprocessor of TiKV. high/normal/low-concurrency is -## used to set the number of threads of the coprocessor. -## If there are many read requests, you can increase these config values (but keep it within the -## number of system CPU cores). For example, for a 32-core machine deployed with TiKV, you can even -## set these config to 30 in heavy read scenarios. -## If CPU_NUM > 8, the default thread pool size for coprocessors is set to CPU_NUM * 0.8. - -# high-concurrency = 8 -# normal-concurrency = 8 -# low-concurrency = 8 -# max-tasks-per-worker-high = 2000 -# max-tasks-per-worker-normal = 2000 -# max-tasks-per-worker-low = 2000 - -[server] -## Listening address. -# addr = "127.0.0.1:20160" - -## Advertise listening address for client communication. -## If not set, `addr` will be used. -# advertise-addr = "" - -## Status address. -## This is used for reporting the status of TiKV directly through -## the HTTP address. Notice that there is a risk of leaking status -## information if this port is exposed to the public. -## Empty string means disabling it. -# status-addr = "127.0.0.1:20180" - -## Set the maximum number of worker threads for the status report HTTP service. -# status-thread-pool-size = 1 - -## Compression type for gRPC channel: none, deflate or gzip. -# grpc-compression-type = "none" - -## Size of the thread pool for the gRPC server. -# grpc-concurrency = 4 - -## The number of max concurrent streams/requests on a client connection. -# grpc-concurrent-stream = 1024 - -## Limit the memory size can be used by gRPC. Default is unlimited. -## gRPC usually works well to reclaim memory by itself. Limit the memory in case OOM -## is observed. Note that limit the usage can lead to potential stall. -# grpc-memory-pool-quota = "32G" - -## The number of connections with each TiKV server to send Raft messages. -# grpc-raft-conn-num = 1 - -## Amount to read ahead on individual gRPC streams. -# grpc-stream-initial-window-size = "2MB" - -## Time to wait before sending out a ping to check if server is still alive. -## This is only for communications between TiKV instances. -# grpc-keepalive-time = "10s" - -## Time to wait before closing the connection without receiving KeepAlive ping Ack. -# grpc-keepalive-timeout = "3s" - -## How many snapshots can be sent concurrently. -# concurrent-send-snap-limit = 32 - -## How many snapshots can be received concurrently. -# concurrent-recv-snap-limit = 32 - -## Max allowed recursion level when decoding Coprocessor DAG expression. -# end-point-recursion-limit = 1000 - -## Max time to handle Coprocessor requests before timeout. -# end-point-request-max-handle-duration = "60s" - -## Max bytes that snapshot can be written to disk in one second. -## It should be set based on your disk performance. -# snap-max-write-bytes-per-sec = "100MB" - -## Whether to enable request batch. -# enable-request-batch = true - -## Whether to collect batch across commands. -## When disabled, wait duration is ignored. When enabled, collect batch for specified duration -## when load is high. -# request-batch-enable-cross-command = true - -## Wait duration before each request batch is processed. Wait is triggered when cross-command -## option is enabled and system load is high. -# request-batch-wait-duration = "1ms" - -## Attributes about this server, e.g. `{ zone = "us-west-1", disk = "ssd" }`. -# labels = {} - -[storage] -## The path to RocksDB directory. -# data-dir = "/tmp/tikv/store" - -## The number of slots in Scheduler latches, which controls write concurrency. -## In most cases you can use the default value. When importing data, you can set it to a larger -## value. -# scheduler-concurrency = 2048000 - -## Scheduler's worker pool size, i.e. the number of write threads. -## It should be less than total CPU cores. When there are frequent write operations, set it to a -## higher value. More specifically, you can run `top -H -p tikv-pid` to check whether the threads -## named `sched-worker-pool` are busy. -# scheduler-worker-pool-size = 4 - -## When the pending write bytes exceeds this threshold, the "scheduler too busy" error is displayed. -# scheduler-pending-write-threshold = "100MB" - -[storage.block-cache] -## Whether to create a shared block cache for all RocksDB column families. -## -## Block cache is used by RocksDB to cache uncompressed blocks. Big block cache can speed up read. -## It is recommended to turn on shared block cache. Since only the total cache size need to be -## set, it is easier to config. In most cases it should be able to auto-balance cache usage -## between column families with standard LRU algorithm. -## -## The rest of config in the storage.block-cache session is effective only when shared block cache -## is on. -# shared = true - -## Size of the shared block cache. Normally it should be tuned to 30%-50% of system's total memory. -## When the config is not set, it is decided by the sum of the following fields or their default -## value: -## * rocksdb.defaultcf.block-cache-size or 25% of system's total memory -## * rocksdb.writecf.block-cache-size or 15% of system's total memory -## * rocksdb.lockcf.block-cache-size or 2% of system's total memory -## * raftdb.defaultcf.block-cache-size or 2% of system's total memory -## -## To deploy multiple TiKV nodes on a single physical machine, configure this parameter explicitly. -## Otherwise, the OOM problem might occur in TiKV. -# capacity = "1GB" - -[pd] -## PD endpoints. -# endpoints = [] - -## The interval at which to retry a PD connection initialization. -## Default is 300ms. -# retry-interval = "300ms" - -## If the client observes an error, it can can skip reporting it except every `n` times. -## Set to 1 to disable this feature. -## Default is 10. -# retry-log-every = 10 - -## The maximum number of times to retry a PD connection initialization. -## Set to 0 to disable retry. -## Default is -1, meaning isize::MAX times. -# retry-max-count = -1 - -[raftstore] -## Whether to force to flush logs. -## Set to `true` (default) for best reliability, which prevents data loss when there is a power -## failure. Set to `false` for higher performance (ensure that you run multiple TiKV nodes!). -# sync-log = true - -## Whether to enable Raft prevote. -## Prevote minimizes disruption when a partitioned node rejoins the cluster by using a two phase -## election. -# prevote = true - -## The path to RaftDB directory. -## If not set, it will be `{data-dir}/raft`. -## If there are multiple disks on the machine, storing the data of Raft RocksDB on differen disks -## can improve TiKV performance. -# raftdb-path = "" - -## Store capacity, i.e. max data size allowed. -## If it is not set, disk capacity is used. -# capacity = 0 - -## Internal notify capacity. -## 40960 is suitable for about 7000 Regions. It is recommended to use the default value. -# notify-capacity = 40960 - -## Maximum number of internal messages to process in a tick. -# messages-per-tick = 4096 - -## Region heartbeat tick interval for reporting to PD. -# pd-heartbeat-tick-interval = "60s" - -## Store heartbeat tick interval for reporting to PD. -# pd-store-heartbeat-tick-interval = "10s" - -## The threshold of triggering Region split check. -## When Region size change exceeds this config, TiKV will check whether the Region should be split -## or not. To reduce the cost of scanning data in the checking process, you can set the value to -## 32MB during checking and set it back to the default value in normal operations. -# region-split-check-diff = "6MB" - -## The interval of triggering Region split check. -# split-region-check-tick-interval = "10s" - -## When the number of Raft entries exceeds the max size, TiKV rejects to propose the entry. -# raft-entry-max-size = "8MB" - -## Interval to GC unnecessary Raft log. -# raft-log-gc-tick-interval = "10s" - -## Threshold to GC stale Raft log, must be >= 1. -# raft-log-gc-threshold = 50 - -## When the entry count exceeds this value, GC will be forced to trigger. -# raft-log-gc-count-limit = 72000 - -## When the approximate size of Raft log entries exceeds this value, GC will be forced trigger. -## It's recommanded to set it to 3/4 of `region-split-size`. -# raft-log-gc-size-limit = "72MB" - -## How long the peer will be considered down and reported to PD when it hasn't been active for this -## time. -# max-peer-down-duration = "5m" - -## Interval to check whether to start manual compaction for a Region. -# region-compact-check-interval = "5m" - -## Number of Regions for each time to check. -# region-compact-check-step = 100 - -## The minimum number of delete tombstones to trigger manual compaction. -# region-compact-min-tombstones = 10000 - -## The minimum percentage of delete tombstones to trigger manual compaction. -## It should be set between 1 and 100. Manual compaction is only triggered when the number of -## delete tombstones exceeds `region-compact-min-tombstones` and the percentage of delete tombstones -## exceeds `region-compact-tombstones-percent`. -# region-compact-tombstones-percent = 30 - -## Interval to check whether to start a manual compaction for Lock Column Family. -## If written bytes reach `lock-cf-compact-bytes-threshold` for Lock Column Family, TiKV will -## trigger a manual compaction for Lock Column Family. -# lock-cf-compact-interval = "10m" -# lock-cf-compact-bytes-threshold = "256MB" - -## Interval (s) to check Region whether the data are consistent. -# consistency-check-interval = 0 - -## Delay time before deleting a stale peer. -# clean-stale-peer-delay = "10m" - -## Interval to clean up import SST files. -# cleanup-import-sst-interval = "10m" - -## Use how many threads to handle log apply -# apply-pool-size = 2 - -## Use how many threads to handle raft messages -# store-pool-size = 2 - -[coprocessor] -## When it is set to `true`, TiKV will try to split a Region with table prefix if that Region -## crosses tables. -## It is recommended to turn off this option if there will be a large number of tables created. -# split-region-on-table = false - -## One split check produces several split keys in batch. This config limits the number of produced -## split keys in one batch. -# batch-split-limit = 10 - -## When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b), -## [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a -## little larger). -# region-max-size = "144MB" -# region-split-size = "96MB" - -## When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into -## several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be -## `region_split_keys`. -# region-max-keys = 1440000 -# region-split-keys = 960000 - -[rocksdb] -## Maximum number of threads of RocksDB background jobs. -## The background tasks include compaction and flush. For detailed information why RocksDB needs to -## do compaction, see RocksDB-related materials. RocksDB will adjust flush and compaction threads -## according to the formula: -## max_flushes = max_flushes = max(1, max_background_jobs / 4) -## max_compactions = max(1, max_background_jobs - max_flushes) -## When write traffic (like the importing data size) is big, it is recommended to enable more -## threads. But set the number of the enabled threads smaller than that of CPU cores. For example, -## when importing data, for a machine with a 32-core CPU, set the value to 28. -## The default value is set to 8 or CPU_NUM - 1, whichever is smaller. -# max-background-jobs = 8 - -## Represents the maximum number of threads that will concurrently perform a sub-compaction job by -## breaking it into multiple, smaller ones running simultaneously. -## The default value is set to 3 or the largest number to allow for two compactions, whichever is -## smaller. -# max-sub-compactions = 3 - -## Number of open files that can be used by the DB. -## Value -1 means files opened are always kept open and RocksDB will prefetch index and filter -## blocks into block cache at startup. So if your database has a large working set, it will take -## several minutes to open the DB. You may need to increase this if your database has a large -## working set. You can estimate the number of files based on `target-file-size-base` and -## `target_file_size_multiplier` for level-based compaction. -# max-open-files = 40960 - -## Max size of RocksDB's MANIFEST file. -## For detailed explanation, please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST -# max-manifest-file-size = "128MB" - -## If the value is `true`, the database will be created if it is missing. -# create-if-missing = true - -## RocksDB Write-Ahead Logs (WAL) recovery mode. -## 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs; -## 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL; -## 2 : PointInTimeRecovery, Recover to point-in-time consistency; -## 3 : SkipAnyCorruptedRecords, Recovery after a disaster; -# wal-recovery-mode = 2 - -## RocksDB WAL directory. -## This config specifies the absolute directory path for WAL. -## If it is not set, the log files will be in the same directory as data. When you set the path to -## RocksDB directory in memory like in `/dev/shm`, you may want to set`wal-dir` to a directory on a -## persistent storage. See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database . -## If there are two disks on the machine, storing RocksDB data and WAL logs on different disks can -## improve performance. -# wal-dir = "/tmp/tikv/store" - -## The following two fields affect how archived WAL will be deleted. -## 1. If both values are set to 0, logs will be deleted ASAP and will not get into the archive. -## 2. If `wal-ttl-seconds` is 0 and `wal-size-limit` is not 0, WAL files will be checked every 10 -## min and if total size is greater than `wal-size-limit`, they will be deleted starting with the -## earliest until `wal-size-limit` is met. All empty files will be deleted. -## 3. If `wal-ttl-seconds` is not 0 and `wal-size-limit` is 0, then WAL files will be checked every -## `wal-ttl-seconds / 2` and those that are older than `wal-ttl-seconds` will be deleted. -## 4. If both are not 0, WAL files will be checked every 10 min and both checks will be performed -## with ttl being first. -## When you set the path to RocksDB directory in memory like in `/dev/shm`, you may want to set -## `wal-ttl-seconds` to a value greater than 0 (like 86400) and backup your DB on a regular basis. -## See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database . -# wal-ttl-seconds = 0 -# wal-size-limit = 0 - -## Max RocksDB WAL size in total -# max-total-wal-size = "4GB" - -## RocksDB Statistics provides cumulative stats over time. -## Turning statistics on will introduce about 5%-10% overhead for RocksDB, but it can help you to -## know the internal status of RocksDB. -# enable-statistics = true - -## Dump statistics periodically in information logs. -## Same as RocksDB's default value (10 min). -# stats-dump-period = "10m" - -## Refer to: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ -## If you want to use RocksDB on multi disks or spinning disks, you should set value at least 2MB. -# compaction-readahead-size = 0 - -## Max buffer size that is used by WritableFileWrite. -# writable-file-max-buffer-size = "1MB" - -## Use O_DIRECT for both reads and writes in background flush and compactions. -# use-direct-io-for-flush-and-compaction = false - -## Limit the disk IO of compaction and flush. -## Compaction and flush can cause terrible spikes if they exceed a certain threshold. Consider -## setting this to 50% ~ 80% of the disk throughput for a more stable result. However, in heavy -## write workload, limiting compaction and flush speed can cause write stalls too. -## 1. rate-bytes-per-sec is the only parameter you want to set most of the time. It controls the -## total write rate of compaction and flush in bytes per second. Currently, RocksDB does not -## enforce rate limit for anything other than flush and compaction, e.g. write to WAL. -## 2. rate-limiter-mode indicates which types of operations count against the limit. -## 1 : ReadOnly -## 2 : WriteOnly -## 3 : AllIo -## 3. auto_tuned enables dynamic adjustment of rate limit within the range -## [rate_bytes_per_sec / 20, rate_bytes_per_sec], according to the recent demand for background I/O. -# rate-bytes-per-sec = 0 -# rate-limiter-mode = 2 -# auto-tuned = false - - -## Enable or disable the pipelined write. -# enable-pipelined-write = true - -## Allows OS to incrementally sync files to disk while they are being written, asynchronously, -## in the background. -# bytes-per-sync = "1MB" - -## Allows OS to incrementally sync WAL to disk while it is being written. -# wal-bytes-per-sync = "512KB" - -## Specify the maximal size of the RocksDB info log file. -## If the log file is larger than this config, a new info log file will be created. -## If it is set to 0, all logs will be written to one log file. -# info-log-max-size = "1GB" - -## Time for the RocksDB info log file to roll (in seconds). -## If the log file has been active longer than this config, it will be rolled. -## If it is set to 0, rolling will be disabled. -# info-log-roll-time = "0" - -## Maximal RocksDB info log files to be kept. -# info-log-keep-log-file-num = 10 - -## Specifies the RocksDB info log directory. -## If it is empty, the log files will be in the same directory as data. -## If it is not empty, the log files will be in the specified directory, and the DB data directory's -## absolute path will be used as the log file name's prefix. -# info-log-dir = "" - -## Options for `Titan`. -[rocksdb.titan] -## Enables or disables `Titan`. Note that Titan is still an experimental feature. Once -## enabled, it can't fall back. Forced fallback may result in data loss. -## default: false -# enabled = false - -## Maximum number of threads of `Titan` background gc jobs. -# default: 4 -# max-background-gc = 4 - -## Options for "Default" Column Family, which stores actual user data. -[rocksdb.defaultcf] -## Compression method (if any) is used to compress a block. -## no: kNoCompression -## snappy: kSnappyCompression -## zlib: kZlibCompression -## bzip2: kBZip2Compression -## lz4: kLZ4Compression -## lz4hc: kLZ4HCCompression -## zstd: kZSTD -## `lz4` is a compression algorithm with moderate speed and compression ratio. The compression -## ratio of `zlib` is high. It is friendly to the storage space, but its compression speed is -## slow. This compression occupies many CPU resources. - -## Per level compression. -## This config should be chosen carefully according to CPU and I/O resources. For example, if you -## use the compression mode of "no:no:lz4:lz4:lz4:zstd:zstd" and find much I/O pressure of the -## system (run the `iostat` command to find %util lasts 100%, or run the `top` command to find many -## iowaits) when writing (importing) a lot of data while the CPU resources are adequate, you can -## compress level-0 and level-1 and exchange CPU resources for I/O resources. If you use the -## compression mode of "no:no:lz4:lz4:lz4:zstd:zstd" and you find the I/O pressure of the system is -## not big when writing a lot of data, but CPU resources are inadequate. Then run the `top` command -## and choose the `-H` option. If you find a lot of bg threads (namely the compression thread of -## RocksDB) are running, you can exchange I/O resources for CPU resources and change the compression -## mode to "no:no:no:lz4:lz4:zstd:zstd". In a word, it aims at making full use of the existing -## resources of the system and improving TiKV performance in terms of the current resources. -# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] - -## The data block size. RocksDB compresses data based on the unit of block. -## Similar to page in other databases, block is the smallest unit cached in block-cache. Note that -## the block size specified here corresponds to uncompressed data. -# block-size = "64KB" - -## If you're doing point lookups you definitely want to turn bloom filters on. We use bloom filters -## to avoid unnecessary disk reads. Default bits_per_key is 10, which yields ~1% false positive -## rate. Larger `bloom-filter-bits-per-key` values will reduce false positive rate, but increase -## memory usage and space amplification. -# bloom-filter-bits-per-key = 10 - -## `false` means one SST file one bloom filter, `true` means every block has a corresponding bloom -## filter. -# block-based-bloom-filter = false - -# level0-file-num-compaction-trigger = 4 - -## Soft limit on number of level-0 files. -## When the number of SST files of level-0 reaches the limit of `level0-slowdown-writes-trigger`, -## RocksDB tries to slow down the write operation, because too many SST files of level-0 can cause -## higher read pressure of RocksDB. -# level0-slowdown-writes-trigger = 20 - -## Maximum number of level-0 files. -## When the number of SST files of level-0 reaches the limit of `level0-stop-writes-trigger`, -## RocksDB stalls the new write operation. -# level0-stop-writes-trigger = 36 - -## Amount of data to build up in memory (backed by an unsorted log on disk) before converting to a -## sorted on-disk file. It is the RocksDB MemTable size. -# write-buffer-size = "128MB" - -## The maximum number of the MemTables. The data written into RocksDB is first recorded in the WAL -## log, and then inserted into MemTables. When the MemTable reaches the size limit of -## `write-buffer-size`, it turns into read only and generates a new MemTable receiving new write -## operations. The flush threads of RocksDB will flush the read only MemTable to the disks to become -## an SST file of level0. `max-background-flushes` controls the maximum number of flush threads. -## When the flush threads are busy, resulting in the number of the MemTables waiting to be flushed -## to the disks reaching the limit of `max-write-buffer-number`, RocksDB stalls the new operation. -## "Stall" is a flow control mechanism of RocksDB. When importing data, you can set the -## `max-write-buffer-number` value higher, like 10. -# max-write-buffer-number = 5 - -## The minimum number of write buffers that will be merged together before writing to storage. -# min-write-buffer-number-to-merge = 1 - -## Control maximum total data size for base level (level 1). -## When the level-1 data size reaches the limit value of `max-bytes-for-level-base`, the SST files -## of level-1 and their overlap SST files of level-2 will be compacted. The golden rule: the first -## reference principle of setting `max-bytes-for-level-base` is guaranteeing that the -## `max-bytes-for-level-base` value is roughly equal to the data volume of level-0. Thus -## unnecessary compaction is reduced. For example, if the compression mode is -## "no:no:lz4:lz4:lz4:lz4:lz4", the `max-bytes-for-level-base` value can be `write-buffer-size * 4`, -## because there is no compression of level-0 and level-1 and the trigger condition of compaction -## for level-0 is that the number of the SST files reaches 4 (the default value). When both level-0 -## and level-1 adopt compaction, it is necessary to analyze RocksDB logs to know the size of an SST -## file compressed from a MemTable. For example, if the file size is 32MB, the proposed value of -## `max-bytes-for-level-base` is 32MB * 4 = 128MB. -# max-bytes-for-level-base = "512MB" - -## Target file size for compaction. -## The SST file size of level-0 is influenced by the compaction algorithm of `write-buffer-size` -## and level0. `target-file-size-base` is used to control the size of a single SST file of level1 to -## level6. -# target-file-size-base = "8MB" - -## Max bytes for `compaction.max_compaction_bytes`. -# max-compaction-bytes = "2GB" - -## There are four different compaction priorities. -## 0 : ByCompensatedSize -## 1 : OldestLargestSeqFirst -## 2 : OldestSmallestSeqFirst -## 3 : MinOverlappingRatio -# compaction-pri = 3 - -## Indicating if we'd put index/filter blocks to the block cache. -## If not specified, each "table reader" object will pre-load index/filter block during table -## initialization. -# cache-index-and-filter-blocks = true - -## Pin level-0 filter and index blocks in cache. -# pin-l0-filter-and-index-blocks = true - -## Enable read amplification statistics. -## value => memory usage (percentage of loaded blocks memory) -## 1 => 12.50 % -## 2 => 06.25 % -## 4 => 03.12 % -## 8 => 01.56 % -## 16 => 00.78 % -# read-amp-bytes-per-bit = 0 - -## Pick target size of each level dynamically. -# dynamic-level-bytes = true - -## Optimizes bloom filters. If true, RocksDB won't create bloom filters for the max level of -## the LSM to reduce metadata that should fit in RAM. -## This value is setted to true for `default` cf by default because its kv data could be determined -## whether really exists by upper logic instead of bloom filters. But we suggest to set it to false -## while using `Raw` mode. -# optimize-filters-for-hits = true - -## Options for "Default" Column Family for `Titan`. -[rocksdb.defaultcf.titan] -## The smallest value to store in blob files. Value smaller than -## this threshold will be inlined in base DB. -## default: 1KB -# min-blob-size = "1KB" - -## The compression algorithm used to compress data in blob files. -## Compression method. -## no: kNoCompression -## snappy: kSnappyCompression -## zlib: kZlibCompression -## bzip2: kBZip2Compression -## lz4: kLZ4Compression -## lz4hc: kLZ4HCCompression -## zstd: kZSTD -# default: lz4 -# blob-file-compression = "lz4" - -## Specifics cache size for blob records -# default: 0 -# blob-cache-size = "0GB" - -## If the ratio of discardable size of a blob file is larger than -## this threshold, the blob file will be GCed out. -# default: 0.5 -# discardable-ratio = 0.5 - -## The mode used to process blob files. In read-only mode Titan -## stops writing value into blob log. In fallback mode Titan -## converts blob index into real value on flush and compaction. -## This option is especially useful for downgrading Titan. -## default: kNormal -## read-only: kReadOnly -## fallback: kFallback -# default: normal -# blob-run-mode = "normal" - -## If set true, values in blob file will be merged to a new blob file while -## their corresponding keys are compacted to last two level in LSM-Tree. -## -## With this feature enabled, Titan could get better scan performance, and -## better write performance during GC, but will suffer around 1.1 space -## amplification and 3 more write amplification if no GC needed (eg. uniformly -## distributed keys) under default rocksdb setting. -## -## Requirement: level_compaction_dynamic_level_base = true -## default: false -# level_merge = false - -## Use merge operator to rewrite GC blob index. -## default: false -# gc-merge-rewrite = false - -## Options for "Write" Column Family, which stores MVCC commit information -[rocksdb.writecf] -## Recommend to set it the same as `rocksdb.defaultcf.compression-per-level`. -# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] -# block-size = "64KB" - -## Recommend to set it the same as `rocksdb.defaultcf.write-buffer-size`. -# write-buffer-size = "128MB" -# max-write-buffer-number = 5 -# min-write-buffer-number-to-merge = 1 - -## Recommend to set it the same as `rocksdb.defaultcf.max-bytes-for-level-base`. -# max-bytes-for-level-base = "512MB" -# target-file-size-base = "8MB" - -# level0-file-num-compaction-trigger = 4 -# level0-slowdown-writes-trigger = 20 -# level0-stop-writes-trigger = 36 -# cache-index-and-filter-blocks = true -# pin-l0-filter-and-index-blocks = true -# compaction-pri = 3 -# read-amp-bytes-per-bit = 0 -# dynamic-level-bytes = true -# optimize-filters-for-hits = false - -[rocksdb.lockcf] -# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"] -# block-size = "16KB" -# write-buffer-size = "32MB" -# max-write-buffer-number = 5 -# min-write-buffer-number-to-merge = 1 -# max-bytes-for-level-base = "128MB" -# target-file-size-base = "8MB" -# level0-file-num-compaction-trigger = 1 -# level0-slowdown-writes-trigger = 20 -# level0-stop-writes-trigger = 36 -# cache-index-and-filter-blocks = true -# pin-l0-filter-and-index-blocks = true -# compaction-pri = 0 -# read-amp-bytes-per-bit = 0 -# dynamic-level-bytes = true -# optimize-filters-for-hits = false - -[raftdb] -# max-background-jobs = 4 -# max-sub-compactions = 2 -# max-open-files = 40960 -# max-manifest-file-size = "20MB" -# create-if-missing = true - -# enable-statistics = true -# stats-dump-period = "10m" - -# compaction-readahead-size = 0 -# writable-file-max-buffer-size = "1MB" -# use-direct-io-for-flush-and-compaction = false -# enable-pipelined-write = true -# allow-concurrent-memtable-write = false -# bytes-per-sync = "1MB" -# wal-bytes-per-sync = "512KB" - -# info-log-max-size = "1GB" -# info-log-roll-time = "0" -# info-log-keep-log-file-num = 10 -# info-log-dir = "" -# optimize-filters-for-hits = true - -[raftdb.defaultcf] -## Recommend to set it the same as `rocksdb.defaultcf.compression-per-level`. -# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] -# block-size = "64KB" - -## Recommend to set it the same as `rocksdb.defaultcf.write-buffer-size`. -# write-buffer-size = "128MB" -# max-write-buffer-number = 5 -# min-write-buffer-number-to-merge = 1 - -## Recommend to set it the same as `rocksdb.defaultcf.max-bytes-for-level-base`. -# max-bytes-for-level-base = "512MB" -# target-file-size-base = "8MB" - -# level0-file-num-compaction-trigger = 4 -# level0-slowdown-writes-trigger = 20 -# level0-stop-writes-trigger = 36 -# cache-index-and-filter-blocks = true -# pin-l0-filter-and-index-blocks = true -# compaction-pri = 0 -# read-amp-bytes-per-bit = 0 -# dynamic-level-bytes = true -# optimize-filters-for-hits = true +enable-prevote = true [security] -## The path for TLS certificates. Empty string means disabling secure connections. -# ca-path = "" -# cert-path = "" -# key-path = "" -# cert-allowed-cn = [] - -# Configurations for encryption at rest. Experimental. -[security.encryption] -## Encryption method to use for data files. -## Possible values are "plaintext", "aes128-ctr", "aes192-ctr" and "aes256-ctr". Value other than -## "plaintext" means encryption is enabled, in which case master key must be specified. -# data-encryption-method = "plaintext" - -## Specifies how often TiKV rotates data encryption key. -# data-key-rotation-period = "7d" - -## Specifies master key if encryption is enabled. There are three types of master key: -## -## * "plaintext": -## -## Plaintext as master key means no master key is given and only applicable when -## encryption is not enabled, i.e. data-encryption-method = "plaintext". This type doesn't -## have sub-config items. Example: -## -## [security.encryption.master-key] -## type = "plaintext" -## -## * "kms": -## -## Use a KMS service to supply master key. Currently only AWS KMS is supported. This type of -## master key is recommended for production use. Example: -## -## [security.encryption.master-key] -## type = "kms" -## ## KMS CMK key id. Must be a valid KMS CMK where the TiKV process has access to. -## ## In production is recommended to grant access of the CMK to TiKV using IAM. -## key-id = "1234abcd-12ab-34cd-56ef-1234567890ab" -## ## AWS region of the KMS CMK. -## region = "us-west-2" -## ## (Optional) AWS KMS service endpoint. Only required when non-default KMS endpoint is -## ## desired. -## endpoint = "https://kms.us-west-2.amazonaws.com" -## -## * "file": -## -## Supply a custom encryption key stored in a file. It is recommended NOT to use in production, -## as it breaks the purpose of encryption at rest, unless the file is stored in tempfs. -## The file must contain a 256-bits (32 bytes, regardless of key length implied by -## data-encryption-method) key encoded as hex string and end with newline ("\n"). Example: -## -## [security.encryption.master-key] -## type = "file" -## path = "/path/to/master/key/file" -## -[security.encryption.master-key] -# type = "plaintext" - -## Specifies the old master key when rotating master key. Same config format as master-key. -## The key is only access once during TiKV startup, after that TiKV do not need access to the key. -## And it is okay to leave the stale previous-master-key config after master key rotation. -[security.encryption.previous-master-key] -# type = "plaintext" - -[import] -## Number of threads to handle RPC requests. -# num-threads = 8 - -## Stream channel window size, stream will be blocked on channel full. -# stream-channel-window = 128 - -[pessimistic-txn] -## Enable pessimistic transaction -# enabled = true - -## The default and maximum delay before responding to TiDB when pessimistic -## transactions encounter locks -# wait-for-lock-timeout = "1s" - -## If more than one transaction is waiting for the same lock, only the one with smallest -## start timestamp will be waked up immediately when the lock is released. Others will -## be waked up after `wake_up_delay_duration` to reduce contention and make the oldest -## one more likely acquires the lock. -# wake-up-delay-duration = "20ms" - -## Enable pipelined pessimistic lock, only effect when processing perssimistic transactions -## Enabled this will improve performance, but slightly increase the transcation failure rate -# pipelined = false - -[gc] -## The number of keys to GC in one batch. -# batch-keys = 512 - -## Max bytes that GC worker can write to rocksdb in one second. -## If it is set to 0, there is no limit. -# max-write-bytes-per-sec = "0" \ No newline at end of file +## Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "" +## Path of file that contains X509 certificate in PEM format. +cert-path = "" +## Path of file that contains X509 key in PEM format. +key-path = "" + +cert-allowed-cn = ["example.com"] + +[log] +level = "info" + +## log format, one of json, text, console +# format = "text" + +## disable automatic timestamps in output +# disable-timestamp = false + +# file logging +[log.file] +# filename = "" +## max log file size in MB +# max-size = 300 +## max log file keep days +# max-days = 28 +## maximum number of old log files to retain +# max-backups = 7 + +[metric] +## prometheus client push interval, set "0s" to disable prometheus. +interval = "15s" +## prometheus pushgateway address, leaves it empty will disable prometheus. +address = "" + +[pd-server] +## the metric storage is the cluster metric storage. This is use for query metric data. +## Currently we use prometheus as metric storage, we may use PD/TiKV as metric storage later. +## For usability, recommended to temporarily set it to the prometheus address, eg: http://127.0.0.1:9090 +metric-storage = "" + +[schedule] +max-merge-region-size = 20 +max-merge-region-keys = 200000 +split-merge-interval = "1h" +max-snapshot-count = 3 +max-pending-peer-count = 16 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 2048 +replica-schedule-limit = 64 +merge-schedule-limit = 8 +hot-region-schedule-limit = 4 +## There are some policies supported: ["count", "size"], default: "count" +# leader-schedule-policy = "count" +## When the score difference between the leader or Region of the two stores is +## less than specified multiple times of the Region size, it is considered in balance by PD. +## If it equals 0.0, PD will automatically adjust it. +# tolerant-size-ratio = 0.0 + +## This three parameters control the merge scheduler behavior. +## If it is true, it means a region can only be merged into the next region of it. +# enable-one-way-merge = false +## If it is true, it means two region within different tables can be merged. +## This option only works when key type is "table". +# enable-cross-table-merge = false + +## customized schedulers, the format is as below +## if empty, it will use balance-leader, balance-region, hot-region as default +# [[schedule.schedulers]] +# type = "evict-leader" +# args = ["1"] + +[replication] +## The number of replicas for each region. +max-replicas = 3 +## The label keys specified the location of a store. +## The placement priorities is implied by the order of label keys. +## For example, ["zone", "rack"] means that we should place replicas to +## different zones first, then to different racks if we don't have enough zones. +location-labels = [] +## Strictly checks if the label of TiKV is matched with location labels. +# strictly-match-label = false + +[label-property] +## Do not assign region leaders to stores that have these tags. +# [[label-property.reject-leader]] +# key = "zone" +# value = "cn1 \ No newline at end of file From 02c4186d4e4ea9698efc0f3cb7f28fa35ffe42c8 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Wed, 27 May 2020 14:14:47 +0800 Subject: [PATCH 4/4] update pd config --- cc/config.config | 110 ------------------ docs/api-references/docs.md | 17 ++- pkg/apis/pingcap/v1alpha1/pd_config.go | 8 +- .../pingcap/v1alpha1/zz_generated.deepcopy.go | 5 + 4 files changed, 24 insertions(+), 116 deletions(-) delete mode 100644 cc/config.config diff --git a/cc/config.config b/cc/config.config deleted file mode 100644 index 75f2cd2794..0000000000 --- a/cc/config.config +++ /dev/null @@ -1,110 +0,0 @@ -# PD Configuration. - -name = "pd" -data-dir = "default.pd" - -client-urls = "http://127.0.0.1:2379" -## if not set, use ${client-urls} -advertise-client-urls = "" - -peer-urls = "http://127.0.0.1:2380" -## if not set, use ${peer-urls} -advertise-peer-urls = "" - -initial-cluster = "pd=http://127.0.0.1:2380" -initial-cluster-state = "new" - -lease = 3 -tso-save-interval = "3s" - -enable-prevote = true - -[security] -## Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty -cacert-path = "" -## Path of file that contains X509 certificate in PEM format. -cert-path = "" -## Path of file that contains X509 key in PEM format. -key-path = "" - -cert-allowed-cn = ["example.com"] - -[log] -level = "info" - -## log format, one of json, text, console -# format = "text" - -## disable automatic timestamps in output -# disable-timestamp = false - -# file logging -[log.file] -# filename = "" -## max log file size in MB -# max-size = 300 -## max log file keep days -# max-days = 28 -## maximum number of old log files to retain -# max-backups = 7 - -[metric] -## prometheus client push interval, set "0s" to disable prometheus. -interval = "15s" -## prometheus pushgateway address, leaves it empty will disable prometheus. -address = "" - -[pd-server] -## the metric storage is the cluster metric storage. This is use for query metric data. -## Currently we use prometheus as metric storage, we may use PD/TiKV as metric storage later. -## For usability, recommended to temporarily set it to the prometheus address, eg: http://127.0.0.1:9090 -metric-storage = "" - -[schedule] -max-merge-region-size = 20 -max-merge-region-keys = 200000 -split-merge-interval = "1h" -max-snapshot-count = 3 -max-pending-peer-count = 16 -max-store-down-time = "30m" -leader-schedule-limit = 4 -region-schedule-limit = 2048 -replica-schedule-limit = 64 -merge-schedule-limit = 8 -hot-region-schedule-limit = 4 -## There are some policies supported: ["count", "size"], default: "count" -# leader-schedule-policy = "count" -## When the score difference between the leader or Region of the two stores is -## less than specified multiple times of the Region size, it is considered in balance by PD. -## If it equals 0.0, PD will automatically adjust it. -# tolerant-size-ratio = 0.0 - -## This three parameters control the merge scheduler behavior. -## If it is true, it means a region can only be merged into the next region of it. -# enable-one-way-merge = false -## If it is true, it means two region within different tables can be merged. -## This option only works when key type is "table". -# enable-cross-table-merge = false - -## customized schedulers, the format is as below -## if empty, it will use balance-leader, balance-region, hot-region as default -# [[schedule.schedulers]] -# type = "evict-leader" -# args = ["1"] - -[replication] -## The number of replicas for each region. -max-replicas = 3 -## The label keys specified the location of a store. -## The placement priorities is implied by the order of label keys. -## For example, ["zone", "rack"] means that we should place replicas to -## different zones first, then to different racks if we don't have enough zones. -location-labels = [] -## Strictly checks if the label of TiKV is matched with location labels. -# strictly-match-label = false - -[label-property] -## Do not assign region leaders to stores that have these tags. -# [[label-property.reject-leader]] -# key = "zone" -# value = "cn1 \ No newline at end of file diff --git a/docs/api-references/docs.md b/docs/api-references/docs.md index a53aedc052..197ac81a45 100644 --- a/docs/api-references/docs.md +++ b/docs/api-references/docs.md @@ -3172,7 +3172,7 @@ CrdKind -tidb_cacert_path
+tidb-cacert-path
string @@ -3183,7 +3183,7 @@ string -tidb_cert_path
+tidb-cert-path
string @@ -3194,7 +3194,18 @@ string -tidb_key_path
+tidb-key-path
+ +string + + + +(Optional) + + + + +public-path-prefix
string diff --git a/pkg/apis/pingcap/v1alpha1/pd_config.go b/pkg/apis/pingcap/v1alpha1/pd_config.go index 55a346dcd4..eb55898454 100644 --- a/pkg/apis/pingcap/v1alpha1/pd_config.go +++ b/pkg/apis/pingcap/v1alpha1/pd_config.go @@ -126,11 +126,13 @@ type PDConfig struct { // DashboardConfig is the configuration for tidb-dashboard. type DashboardConfig struct { // +optional - TiDBCAPath *string `toml:"tidb-cacert-path,omitempty" json:"tidb_cacert_path,omitempty"` + TiDBCAPath *string `toml:"tidb-cacert-path,omitempty" json:"tidb-cacert-path,omitempty"` // +optional - TiDBCertPath *string `toml:"tidb-cert-path,omitempty" json:"tidb_cert_path,omitempty"` + TiDBCertPath *string `toml:"tidb-cert-path,omitempty" json:"tidb-cert-path,omitempty"` // +optional - TiDBKeyPath *string `toml:"tidb-key-path,omitempty" json:"tidb_key_path,omitempty"` + TiDBKeyPath *string `toml:"tidb-key-path,omitempty" json:"tidb-key-path,omitempty"` + // +optional + PublicPathPrefix *string `toml:"public-path-prefix,omitempty" json:"public-path-prefix,omitempty"` } // PDLogConfig serializes log related config in toml/json. diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go index 24d14afe2b..e75f8dab81 100644 --- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go @@ -767,6 +767,11 @@ func (in *DashboardConfig) DeepCopyInto(out *DashboardConfig) { *out = new(string) **out = **in } + if in.PublicPathPrefix != nil { + in, out := &in.PublicPathPrefix, &out.PublicPathPrefix + *out = new(string) + **out = **in + } return }