Skip to content

Commit

Permalink
cherry pick pingcap#15409 to release-3.1
Browse files Browse the repository at this point in the history
Signed-off-by: sre-bot <sre-bot@pingcap.com>
  • Loading branch information
AilinKid committed Apr 26, 2020
1 parent 489b077 commit f562443
Show file tree
Hide file tree
Showing 10 changed files with 273 additions and 19 deletions.
105 changes: 105 additions & 0 deletions ddl/db_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package ddl_test
import (
"context"
"fmt"
"strconv"
"strings"
"sync/atomic"
"time"
Expand Down Expand Up @@ -1935,3 +1936,107 @@ func (s *testIntegrationSuite3) TestForeignKeyOnUpdateOnDelete(c *C) {
tk.MustExec("create table t5 (a int, b int, foreign key (b) references t (a) on update restrict)")
tk.MustExec("create table t6 (a int, b int, foreign key (b) references t (a) on update restrict on delete restrict)")
}

// TestCreateTableWithAutoIdCache test the auto_id_cache table option.
// `auto_id_cache` take effects on handle too when `PKIshandle` is false,
// or even there is no auto_increment column at all.
func (s *testIntegrationSuite3) TestCreateTableWithAutoIdCache(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("drop table if exists t1;")

// Test primary key is handle.
tk.MustExec("create table t(a int auto_increment key) auto_id_cache 100")
tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(100))
tk.MustExec("insert into t values()")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustExec("delete from t")

// Invalid the allocator cache, insert will trigger a new cache
tk.MustExec("rename table t to t1;")
tk.MustExec("insert into t1 values()")
tk.MustQuery("select * from t1").Check(testkit.Rows("101"))

// Test primary key is not handle.
tk.MustExec("drop table if exists t;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t(a int) auto_id_cache 100")
tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)

tk.MustExec("insert into t values()")
tk.MustQuery("select _tidb_rowid from t").Check(testkit.Rows("1"))
tk.MustExec("delete from t")

// Invalid the allocator cache, insert will trigger a new cache
tk.MustExec("rename table t to t1;")
tk.MustExec("insert into t1 values()")
tk.MustQuery("select _tidb_rowid from t1").Check(testkit.Rows("101"))

// Test both auto_increment and rowid exist.
tk.MustExec("drop table if exists t;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t(a int null, b int auto_increment unique) auto_id_cache 100")
tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)

tk.MustExec("insert into t(b) values(NULL)")
tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("1 2"))
tk.MustExec("delete from t")

// Invalid the allocator cache, insert will trigger a new cache.
tk.MustExec("rename table t to t1;")
tk.MustExec("insert into t1(b) values(NULL)")
tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("101 102"))
tk.MustExec("delete from t1")

// Test alter auto_id_cache.
tk.MustExec("alter table t1 auto_id_cache 200")
tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1"))
c.Assert(err, IsNil)
c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(200))

tk.MustExec("insert into t1(b) values(NULL)")
tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("201 202"))
tk.MustExec("delete from t1")

// Invalid the allocator cache, insert will trigger a new cache.
tk.MustExec("rename table t1 to t;")
tk.MustExec("insert into t(b) values(NULL)")
tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("401 402"))
tk.MustExec("delete from t")

tk.MustExec("drop table if exists t;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t(a int auto_increment key) auto_id_cache 3")
tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(3))

// Test insert batch size(4 here) greater than the customized autoid step(3 here).
tk.MustExec("insert into t(a) values(NULL),(NULL),(NULL),(NULL)")
tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustExec("delete from t")

// Invalid the allocator cache, insert will trigger a new cache.
tk.MustExec("rename table t to t1;")
tk.MustExec("insert into t1(a) values(NULL)")
next := tk.MustQuery("select a from t1").Rows()[0][0].(string)
nextInt, err := strconv.Atoi(next)
c.Assert(err, IsNil)
c.Assert(nextInt, Greater, 5)

// Test auto_id_cache overflows int64.
tk.MustExec("drop table if exists t;")
_, err = tk.Exec("create table t(a int) auto_id_cache = 9223372036854775808")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64")

tk.MustExec("create table t(a int) auto_id_cache = 9223372036854775807")
_, err = tk.Exec("alter table t auto_id_cache = 9223372036854775808")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64")
}
33 changes: 33 additions & 0 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1897,6 +1897,12 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err
switch op.Tp {
case ast.TableOptionAutoIncrement:
tbInfo.AutoIncID = int64(op.UintValue)
case ast.TableOptionAutoIdCache:
if op.UintValue > uint64(math.MaxInt64) {
// TODO: Refine this error.
return errors.New("table option auto_id_cache overflows int64")
}
tbInfo.AutoIdCache = int64(op.UintValue)
case ast.TableOptionAutoRandomBase:
tbInfo.AutoRandID = int64(op.UintValue)
case ast.TableOptionComment:
Expand Down Expand Up @@ -2083,6 +2089,12 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A
err = d.ShardRowID(ctx, ident, opt.UintValue)
case ast.TableOptionAutoIncrement:
err = d.RebaseAutoID(ctx, ident, int64(opt.UintValue), autoid.RowIDAllocType)
case ast.TableOptionAutoIdCache:
if opt.UintValue > uint64(math.MaxInt64) {
// TODO: Refine this error.
return errors.New("table option auto_id_cache overflows int64")
}
err = d.AlterTableAutoIDCache(ctx, ident, int64(opt.UintValue))
case ast.TableOptionAutoRandomBase:
err = d.RebaseAutoID(ctx, ident, int64(opt.UintValue), autoid.AutoRandomType)
case ast.TableOptionComment:
Expand Down Expand Up @@ -3041,6 +3053,27 @@ func (d *ddl) AlterTableComment(ctx sessionctx.Context, ident ast.Ident, spec *a
return errors.Trace(err)
}

// AlterTableAutoIDCache updates the table comment information.
func (d *ddl) AlterTableAutoIDCache(ctx sessionctx.Context, ident ast.Ident, newCache int64) error {
schema, tb, err := d.getSchemaAndTableByIdent(ctx, ident)
if err != nil {
return errors.Trace(err)
}

job := &model.Job{
SchemaID: schema.ID,
TableID: tb.Meta().ID,
SchemaName: schema.Name.L,
Type: model.ActionModifyTableAutoIdCache,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{newCache},
}

err = d.doDDLJob(ctx, job)
err = d.callHookOnChanged(err)
return errors.Trace(err)
}

// AlterTableCharset changes the table charset and collate.
func (d *ddl) AlterTableCharsetAndCollate(ctx sessionctx.Context, ident ast.Ident, toCharset, toCollate string) error {
// use the last one.
Expand Down
2 changes: 2 additions & 0 deletions ddl/ddl_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -613,6 +613,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64,
ver, err = w.onShardRowID(d, t, job)
case model.ActionModifyTableComment:
ver, err = onModifyTableComment(t, job)
case model.ActionModifyTableAutoIdCache:
ver, err = onModifyTableAutoIDCache(t, job)
case model.ActionAddTablePartition:
ver, err = onAddTablePartition(d, t, job)
case model.ActionModifyTableCharsetAndCollate:
Expand Down
2 changes: 1 addition & 1 deletion ddl/rollingback.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job)
model.ActionModifyColumn, model.ActionAddForeignKey,
model.ActionDropForeignKey, model.ActionRenameTable,
model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition,
model.ActionModifySchemaCharsetAndCollate:
model.ActionModifySchemaCharsetAndCollate, model.ActionModifyTableAutoIdCache:
ver, err = cancelOnlyNotHandledJob(job)
default:
job.State = model.JobStateCancelled
Expand Down
21 changes: 21 additions & 0 deletions ddl/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,27 @@ func onRebaseAutoID(store kv.Storage, t *meta.Meta, job *model.Job, tp autoid.Al
return ver, nil
}

func onModifyTableAutoIDCache(t *meta.Meta, job *model.Job) (int64, error) {
var cache int64
if err := job.DecodeArgs(&cache); err != nil {
job.State = model.JobStateCancelled
return 0, errors.Trace(err)
}

tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return 0, errors.Trace(err)
}

tblInfo.AutoIdCache = cache
ver, err := updateVersionAndTableInfo(t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
return ver, nil
}

func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
var shardRowIDBits uint64
err := job.DecodeArgs(&shardRowIDBits)
Expand Down
4 changes: 4 additions & 0 deletions executor/show.go
Original file line number Diff line number Diff line change
Expand Up @@ -878,6 +878,10 @@ func ConstructResultOfShowCreateTable(ctx sessionctx.Context, tableInfo *model.T
}
}

if tableInfo.AutoIdCache != 0 {
fmt.Fprintf(buf, " /*T![auto_id_cache] AUTO_ID_CACHE=%d */", tableInfo.AutoIdCache)
}

if tableInfo.AutoRandID != 0 {
fmt.Fprintf(buf, " /*T![auto_rand_base] AUTO_RANDOM_BASE=%d */", tableInfo.AutoRandID)
}
Expand Down
36 changes: 36 additions & 0 deletions executor/show_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -729,3 +729,39 @@ func (s *testAutoRandomSuite) TestShowCreateTableAutoRandom(c *C) {
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_rand_base] AUTO_RANDOM_BASE=200 */",
))
}

// Override testAutoRandomSuite to test auto id cache.
func (s *testAutoRandomSuite) TestAutoIdCache(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")

tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int auto_increment key) auto_id_cache = 10")
tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `a` int(11) NOT NULL AUTO_INCREMENT,\n"+
" PRIMARY KEY (`a`)\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=10 */",
))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int auto_increment unique, b int key) auto_id_cache 100")
tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `a` int(11) NOT NULL AUTO_INCREMENT,\n"+
" `b` int(11) NOT NULL,\n"+
" PRIMARY KEY (`b`),\n"+
" UNIQUE KEY `a` (`a`)\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=100 */",
))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int key) auto_id_cache 5")
tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `a` int(11) NOT NULL,\n"+
" PRIMARY KEY (`a`)\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=5 */",
))
}
67 changes: 50 additions & 17 deletions meta/autoid/autoid.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,20 @@ const (
AutoRandomType
)

// CustomAutoIncCacheOption is one kind of AllocOption to customize the allocator step length.
type CustomAutoIncCacheOption int64

// ApplyOn is implement the AllocOption interface.
func (step CustomAutoIncCacheOption) ApplyOn(alloc *allocator) {
alloc.step = int64(step)
alloc.customStep = true
}

// AllocOption is a interface to define allocator custom options coming in future.
type AllocOption interface {
ApplyOn(*allocator)
}

// Allocator is an auto increment id generator.
// Just keep id unique actually.
type Allocator interface {
Expand Down Expand Up @@ -103,6 +117,7 @@ type allocator struct {
isUnsigned bool
lastAllocTime time.Time
step int64
customStep bool
allocType AllocatorType
}

Expand Down Expand Up @@ -277,22 +292,30 @@ func NextStep(curStep int64, consumeDur time.Duration) int64 {
}

// NewAllocator returns a new auto increment id generator on the store.
func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool, allocType AllocatorType) Allocator {
return &allocator{
func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool, allocType AllocatorType, opts ...AllocOption) Allocator {
alloc := &allocator{
store: store,
dbID: dbID,
isUnsigned: isUnsigned,
step: step,
lastAllocTime: time.Now(),
allocType: allocType,
}
for _, fn := range opts {
fn.ApplyOn(alloc)
}
return alloc
}

// NewAllocatorsFromTblInfo creates an array of allocators of different types with the information of model.TableInfo.
func NewAllocatorsFromTblInfo(store kv.Storage, schemaID int64, tblInfo *model.TableInfo) Allocators {
var allocs []Allocator
dbID := tblInfo.GetDBID(schemaID)
allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType))
if tblInfo.AutoIdCache > 0 {
allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType, CustomAutoIncCacheOption(tblInfo.AutoIdCache)))
} else {
allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType))
}
if tblInfo.ContainsAutoRandomBits() {
allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoRandomBitColUnsigned(), AutoRandomType))
}
Expand Down Expand Up @@ -401,13 +424,18 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64, increment, offset
if alloc.base+n1 > alloc.end {
var newBase, newEnd int64
startTime := time.Now()
// Although it may skip a segment here, we still think it is consumed.
consumeDur := startTime.Sub(alloc.lastAllocTime)
nextStep := NextStep(alloc.step, consumeDur)
// Make sure nextStep is big enough.
nextStep := alloc.step
if !alloc.customStep {
// Although it may skip a segment here, we still think it is consumed.
consumeDur := startTime.Sub(alloc.lastAllocTime)
nextStep = NextStep(alloc.step, consumeDur)
}
// Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch.
if nextStep <= n1 {
alloc.step = mathutil.MinInt64(n1*2, maxStep)
} else {
nextStep = mathutil.MinInt64(n1*2, maxStep)
}
// Store the step for non-customized-step allocator to calculate next dynamic step.
if !alloc.customStep {
alloc.step = nextStep
}
err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error {
Expand All @@ -417,7 +445,7 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64, increment, offset
if err1 != nil {
return err1
}
tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step)
tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, nextStep)
// The global rest is not enough for alloc.
if tmpStep < n1 {
return ErrAutoincReadFailed
Expand Down Expand Up @@ -463,13 +491,18 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64, increment, offse
if uint64(alloc.base)+uint64(n1) > uint64(alloc.end) {
var newBase, newEnd int64
startTime := time.Now()
// Although it may skip a segment here, we still treat it as consumed.
consumeDur := startTime.Sub(alloc.lastAllocTime)
nextStep := NextStep(alloc.step, consumeDur)
// Make sure nextStep is big enough.
nextStep := alloc.step
if !alloc.customStep {
// Although it may skip a segment here, we still treat it as consumed.
consumeDur := startTime.Sub(alloc.lastAllocTime)
nextStep = NextStep(alloc.step, consumeDur)
}
// Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch.
if nextStep <= n1 {
alloc.step = mathutil.MinInt64(n1*2, maxStep)
} else {
nextStep = mathutil.MinInt64(n1*2, maxStep)
}
// Store the step for non-customized-step allocator to calculate next dynamic step.
if !alloc.customStep {
alloc.step = nextStep
}
err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error {
Expand All @@ -479,7 +512,7 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64, increment, offse
if err1 != nil {
return err1
}
tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step)))
tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(nextStep)))
// The global rest is not enough for alloc.
if tmpStep < n1 {
return ErrAutoincReadFailed
Expand Down
Loading

0 comments on commit f562443

Please sign in to comment.