Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ddl: pre-split region for partitioned table #10221

Merged
merged 8 commits into from
Apr 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1202,14 +1202,26 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e

err = d.doDDLJob(ctx, job)
if err == nil {
var preSplitAndScatter func()
// do pre-split and scatter.
if tbInfo.ShardRowIDBits > 0 && tbInfo.PreSplitRegions > 0 {
preSplitAndScatter = func() { preSplitTableRegion(d.store, tbInfo, ctx.GetSessionVars().WaitTableSplitFinish) }
} else if atomic.LoadUint32(&EnableSplitTableRegion) != 0 {
pi := tbInfo.GetPartitionInfo()
if pi != nil {
preSplitAndScatter = func() { splitPartitionTableRegion(d.store, pi) }
} else {
preSplitAndScatter = func() { splitTableRegion(d.store, tbInfo.ID) }
}
}
if preSplitAndScatter != nil {
if ctx.GetSessionVars().WaitTableSplitFinish {
preSplitTableRegion(d.store, tbInfo, true)
preSplitAndScatter()
} else {
go preSplitTableRegion(d.store, tbInfo, false)
go preSplitAndScatter()
}
}

if tbInfo.AutoIncID > 1 {
// Default tableAutoIncID base is 0.
// If the first ID is expected to greater than 1, we need to do rebase.
Expand Down
12 changes: 7 additions & 5 deletions ddl/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
"fmt"
"strconv"
"strings"
"sync/atomic"

"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
Expand Down Expand Up @@ -74,10 +73,6 @@ func onCreateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error)
if err != nil {
return ver, errors.Trace(err)
}
if atomic.LoadUint32(&EnableSplitTableRegion) != 0 {
// TODO: Add restrictions to this operation.
go splitTableRegion(d.store, tbInfo.ID)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbInfo)
asyncNotifyEvent(d, &util.Event{Tp: model.ActionCreateTable, TableInfo: tbInfo})
Expand Down Expand Up @@ -340,6 +335,13 @@ type splitableStore interface {
WaitScatterRegionFinish(regionID uint64) error
}

func splitPartitionTableRegion(store kv.Storage, pi *model.PartitionInfo) {
// Max partition count is 4096, should we sample and just choose some of the partition to split?
for _, def := range pi.Definitions {
splitTableRegion(store, def.ID)
}
}

func splitTableRegion(store kv.Storage, tableID int64) {
s, ok := store.(splitableStore)
if !ok {
Expand Down
28 changes: 24 additions & 4 deletions ddl/table_split_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/testkit"
)

type testDDLTableSplitSuite struct{}
Expand All @@ -41,20 +42,39 @@ func (s *testDDLTableSplitSuite) TestTableSplit(c *C) {
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
tk.MustExec(`create table t_part (a int key) partition by range(a) (
partition p0 values less than (10),
partition p1 values less than (20)
)`)
defer dom.Close()
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
infoSchema := dom.InfoSchema()
c.Assert(infoSchema, NotNil)
t, err := infoSchema.TableByName(model.NewCIStr("mysql"), model.NewCIStr("tidb"))
c.Assert(err, IsNil)
regionStartKey := tablecodec.EncodeTablePrefix(t.Meta().ID)
checkRegionStartWithTableID(c, t.Meta().ID, store.(kvStore))

type kvStore interface {
GetRegionCache() *tikv.RegionCache
t, err = infoSchema.TableByName(model.NewCIStr("test"), model.NewCIStr("t_part"))
c.Assert(err, IsNil)
pi := t.Meta().GetPartitionInfo()
c.Assert(pi, NotNil)
for _, def := range pi.Definitions {
checkRegionStartWithTableID(c, def.ID, store.(kvStore))
}
}

type kvStore interface {
GetRegionCache() *tikv.RegionCache
}

func checkRegionStartWithTableID(c *C, id int64, store kvStore) {
regionStartKey := tablecodec.EncodeTablePrefix(id)
var loc *tikv.KeyLocation
var err error
for i := 0; i < 10; i++ {
cache := store.(kvStore).GetRegionCache()
cache := store.GetRegionCache()
loc, err = cache.LocateKey(tikv.NewBackoffer(context.Background(), 5000), regionStartKey)
c.Assert(err, IsNil)

Expand Down