diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 9f258dbe165bb..72793151ab32f 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -16,6 +16,7 @@ package ddl_test import ( "context" "fmt" + "strconv" "strings" "sync/atomic" "time" @@ -1935,3 +1936,107 @@ func (s *testIntegrationSuite3) TestForeignKeyOnUpdateOnDelete(c *C) { tk.MustExec("create table t5 (a int, b int, foreign key (b) references t (a) on update restrict)") tk.MustExec("create table t6 (a int, b int, foreign key (b) references t (a) on update restrict on delete restrict)") } + +// TestCreateTableWithAutoIdCache test the auto_id_cache table option. +// `auto_id_cache` take effects on handle too when `PKIshandle` is false, +// or even there is no auto_increment column at all. +func (s *testIntegrationSuite3) TestCreateTableWithAutoIdCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("USE test;") + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + + // Test primary key is handle. + tk.MustExec("create table t(a int auto_increment key) auto_id_cache 100") + tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(100)) + tk.MustExec("insert into t values()") + tk.MustQuery("select * from t").Check(testkit.Rows("1")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1 values()") + tk.MustQuery("select * from t1").Check(testkit.Rows("101")) + + // Test primary key is not handle. + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int) auto_id_cache 100") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + + tk.MustExec("insert into t values()") + tk.MustQuery("select _tidb_rowid from t").Check(testkit.Rows("1")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1 values()") + tk.MustQuery("select _tidb_rowid from t1").Check(testkit.Rows("101")) + + // Test both auto_increment and rowid exist. + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int null, b int auto_increment unique) auto_id_cache 100") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + + tk.MustExec("insert into t(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("1 2")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("101 102")) + tk.MustExec("delete from t1") + + // Test alter auto_id_cache. + tk.MustExec("alter table t1 auto_id_cache 200") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(200)) + + tk.MustExec("insert into t1(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("201 202")) + tk.MustExec("delete from t1") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t1 to t;") + tk.MustExec("insert into t(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("401 402")) + tk.MustExec("delete from t") + + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int auto_increment key) auto_id_cache 3") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(3)) + + // Test insert batch size(4 here) greater than the customized autoid step(3 here). + tk.MustExec("insert into t(a) values(NULL),(NULL),(NULL),(NULL)") + tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1(a) values(NULL)") + next := tk.MustQuery("select a from t1").Rows()[0][0].(string) + nextInt, err := strconv.Atoi(next) + c.Assert(err, IsNil) + c.Assert(nextInt, Greater, 5) + + // Test auto_id_cache overflows int64. + tk.MustExec("drop table if exists t;") + _, err = tk.Exec("create table t(a int) auto_id_cache = 9223372036854775808") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64") + + tk.MustExec("create table t(a int) auto_id_cache = 9223372036854775807") + _, err = tk.Exec("alter table t auto_id_cache = 9223372036854775808") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64") +} diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 69e21b979fa6e..dbeea838b2eb2 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1897,6 +1897,12 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err switch op.Tp { case ast.TableOptionAutoIncrement: tbInfo.AutoIncID = int64(op.UintValue) + case ast.TableOptionAutoIdCache: + if op.UintValue > uint64(math.MaxInt64) { + // TODO: Refine this error. + return errors.New("table option auto_id_cache overflows int64") + } + tbInfo.AutoIdCache = int64(op.UintValue) case ast.TableOptionAutoRandomBase: tbInfo.AutoRandID = int64(op.UintValue) case ast.TableOptionComment: @@ -2083,6 +2089,12 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A err = d.ShardRowID(ctx, ident, opt.UintValue) case ast.TableOptionAutoIncrement: err = d.RebaseAutoID(ctx, ident, int64(opt.UintValue), autoid.RowIDAllocType) + case ast.TableOptionAutoIdCache: + if opt.UintValue > uint64(math.MaxInt64) { + // TODO: Refine this error. + return errors.New("table option auto_id_cache overflows int64") + } + err = d.AlterTableAutoIDCache(ctx, ident, int64(opt.UintValue)) case ast.TableOptionAutoRandomBase: err = d.RebaseAutoID(ctx, ident, int64(opt.UintValue), autoid.AutoRandomType) case ast.TableOptionComment: @@ -3041,6 +3053,27 @@ func (d *ddl) AlterTableComment(ctx sessionctx.Context, ident ast.Ident, spec *a return errors.Trace(err) } +// AlterTableAutoIDCache updates the table comment information. +func (d *ddl) AlterTableAutoIDCache(ctx sessionctx.Context, ident ast.Ident, newCache int64) error { + schema, tb, err := d.getSchemaAndTableByIdent(ctx, ident) + if err != nil { + return errors.Trace(err) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tb.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionModifyTableAutoIdCache, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{newCache}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + // AlterTableCharset changes the table charset and collate. func (d *ddl) AlterTableCharsetAndCollate(ctx sessionctx.Context, ident ast.Ident, toCharset, toCollate string) error { // use the last one. diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 88372f6569505..66064dac98886 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -613,6 +613,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = w.onShardRowID(d, t, job) case model.ActionModifyTableComment: ver, err = onModifyTableComment(t, job) + case model.ActionModifyTableAutoIdCache: + ver, err = onModifyTableAutoIDCache(t, job) case model.ActionAddTablePartition: ver, err = onAddTablePartition(d, t, job) case model.ActionModifyTableCharsetAndCollate: diff --git a/ddl/rollingback.go b/ddl/rollingback.go index 21040b72b3c44..0515ecb0eca4f 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -295,7 +295,7 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) model.ActionModifyColumn, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, - model.ActionModifySchemaCharsetAndCollate: + model.ActionModifySchemaCharsetAndCollate, model.ActionModifyTableAutoIdCache: ver, err = cancelOnlyNotHandledJob(job) default: job.State = model.JobStateCancelled diff --git a/ddl/table.go b/ddl/table.go index cf7500ec6f3c2..669d047fc2d03 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -508,6 +508,27 @@ func onRebaseAutoID(store kv.Storage, t *meta.Meta, job *model.Job, tp autoid.Al return ver, nil } +func onModifyTableAutoIDCache(t *meta.Meta, job *model.Job) (int64, error) { + var cache int64 + if err := job.DecodeArgs(&cache); err != nil { + job.State = model.JobStateCancelled + return 0, errors.Trace(err) + } + + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + return 0, errors.Trace(err) + } + + tblInfo.AutoIdCache = cache + ver, err := updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { var shardRowIDBits uint64 err := job.DecodeArgs(&shardRowIDBits) diff --git a/executor/show.go b/executor/show.go index f18a5a0bbe79f..9aab4595ca724 100644 --- a/executor/show.go +++ b/executor/show.go @@ -878,6 +878,10 @@ func ConstructResultOfShowCreateTable(ctx sessionctx.Context, tableInfo *model.T } } + if tableInfo.AutoIdCache != 0 { + fmt.Fprintf(buf, " /*T![auto_id_cache] AUTO_ID_CACHE=%d */", tableInfo.AutoIdCache) + } + if tableInfo.AutoRandID != 0 { fmt.Fprintf(buf, " /*T![auto_rand_base] AUTO_RANDOM_BASE=%d */", tableInfo.AutoRandID) } diff --git a/executor/show_test.go b/executor/show_test.go index 08967ba3c76bc..ec7a1ee4f326f 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -729,3 +729,39 @@ func (s *testAutoRandomSuite) TestShowCreateTableAutoRandom(c *C) { ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_rand_base] AUTO_RANDOM_BASE=200 */", )) } + +// Override testAutoRandomSuite to test auto id cache. +func (s *testAutoRandomSuite) TestAutoIdCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int auto_increment key) auto_id_cache = 10") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL AUTO_INCREMENT,\n"+ + " PRIMARY KEY (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=10 */", + )) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int auto_increment unique, b int key) auto_id_cache 100") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL AUTO_INCREMENT,\n"+ + " `b` int(11) NOT NULL,\n"+ + " PRIMARY KEY (`b`),\n"+ + " UNIQUE KEY `a` (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=100 */", + )) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int key) auto_id_cache 5") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL,\n"+ + " PRIMARY KEY (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=5 */", + )) +} diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 52580b7d98161..27b48eaec51f5 100755 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -60,6 +60,20 @@ const ( AutoRandomType ) +// CustomAutoIncCacheOption is one kind of AllocOption to customize the allocator step length. +type CustomAutoIncCacheOption int64 + +// ApplyOn is implement the AllocOption interface. +func (step CustomAutoIncCacheOption) ApplyOn(alloc *allocator) { + alloc.step = int64(step) + alloc.customStep = true +} + +// AllocOption is a interface to define allocator custom options coming in future. +type AllocOption interface { + ApplyOn(*allocator) +} + // Allocator is an auto increment id generator. // Just keep id unique actually. type Allocator interface { @@ -103,6 +117,7 @@ type allocator struct { isUnsigned bool lastAllocTime time.Time step int64 + customStep bool allocType AllocatorType } @@ -277,8 +292,8 @@ func NextStep(curStep int64, consumeDur time.Duration) int64 { } // NewAllocator returns a new auto increment id generator on the store. -func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool, allocType AllocatorType) Allocator { - return &allocator{ +func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool, allocType AllocatorType, opts ...AllocOption) Allocator { + alloc := &allocator{ store: store, dbID: dbID, isUnsigned: isUnsigned, @@ -286,13 +301,21 @@ func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool, allocType Alloc lastAllocTime: time.Now(), allocType: allocType, } + for _, fn := range opts { + fn.ApplyOn(alloc) + } + return alloc } // NewAllocatorsFromTblInfo creates an array of allocators of different types with the information of model.TableInfo. func NewAllocatorsFromTblInfo(store kv.Storage, schemaID int64, tblInfo *model.TableInfo) Allocators { var allocs []Allocator dbID := tblInfo.GetDBID(schemaID) - allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType)) + if tblInfo.AutoIdCache > 0 { + allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType, CustomAutoIncCacheOption(tblInfo.AutoIdCache))) + } else { + allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType)) + } if tblInfo.ContainsAutoRandomBits() { allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoRandomBitColUnsigned(), AutoRandomType)) } @@ -401,13 +424,18 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64, increment, offset if alloc.base+n1 > alloc.end { var newBase, newEnd int64 startTime := time.Now() - // Although it may skip a segment here, we still think it is consumed. - consumeDur := startTime.Sub(alloc.lastAllocTime) - nextStep := NextStep(alloc.step, consumeDur) - // Make sure nextStep is big enough. + nextStep := alloc.step + if !alloc.customStep { + // Although it may skip a segment here, we still think it is consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep = NextStep(alloc.step, consumeDur) + } + // Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch. if nextStep <= n1 { - alloc.step = mathutil.MinInt64(n1*2, maxStep) - } else { + nextStep = mathutil.MinInt64(n1*2, maxStep) + } + // Store the step for non-customized-step allocator to calculate next dynamic step. + if !alloc.customStep { alloc.step = nextStep } err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { @@ -417,7 +445,7 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64, increment, offset if err1 != nil { return err1 } - tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) + tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, nextStep) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed @@ -463,13 +491,18 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64, increment, offse if uint64(alloc.base)+uint64(n1) > uint64(alloc.end) { var newBase, newEnd int64 startTime := time.Now() - // Although it may skip a segment here, we still treat it as consumed. - consumeDur := startTime.Sub(alloc.lastAllocTime) - nextStep := NextStep(alloc.step, consumeDur) - // Make sure nextStep is big enough. + nextStep := alloc.step + if !alloc.customStep { + // Although it may skip a segment here, we still treat it as consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep = NextStep(alloc.step, consumeDur) + } + // Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch. if nextStep <= n1 { - alloc.step = mathutil.MinInt64(n1*2, maxStep) - } else { + nextStep = mathutil.MinInt64(n1*2, maxStep) + } + // Store the step for non-customized-step allocator to calculate next dynamic step. + if !alloc.customStep { alloc.step = nextStep } err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { @@ -479,7 +512,7 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64, increment, offse if err1 != nil { return err1 } - tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) + tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(nextStep))) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go index b72f610af297d..e63a22e81213c 100644 --- a/sessionctx/binloginfo/binloginfo_test.go +++ b/sessionctx/binloginfo/binloginfo_test.go @@ -507,6 +507,26 @@ func (s *testBinlogSuite) TestAddSpecialComment(c *C) { "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", }, + { + "create table t1 (id int auto_increment key) auto_id_cache 100;", + "create table t1 (id int auto_increment key) /*T![auto_id_cache] auto_id_cache 100 */ ;", + }, + { + "create table t1 (id int auto_increment unique) auto_id_cache 10;", + "create table t1 (id int auto_increment unique) /*T![auto_id_cache] auto_id_cache 10 */ ;", + }, + { + "create table t1 (id int) auto_id_cache = 5;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache = 5 */ ;", + }, + { + "create table t1 (id int) auto_id_cache=5;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + }, + { + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + }, } for _, ca := range testCase { re := binloginfo.AddSpecialComment(ca.input) diff --git a/util/admin/admin.go b/util/admin/admin.go index 2ea4125427312..e24e57a3bfc5c 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -107,7 +107,7 @@ func IsJobRollbackable(job *model.Job) bool { model.ActionTruncateTable, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, - model.ActionModifySchemaCharsetAndCollate: + model.ActionModifySchemaCharsetAndCollate, model.ActionModifyTableAutoIdCache: return job.SchemaState == model.StateNone } return true