Skip to content

Commit

Permalink
Squashed commit of the following:
Browse files Browse the repository at this point in the history
commit 51853c7
Author: Yiding Cui <winoros@gmail.com>
Date:   Fri Oct 28 11:25:16 2022 +0800

    fix

commit bdf9854
Merge: 27324cb fb6a131
Author: Yiding Cui <winoros@gmail.com>
Date:   Fri Oct 28 11:08:16 2022 +0800

    Merge branch 'master' into part-table-topn

commit 27324cb
Author: Yiding Cui <winoros@gmail.com>
Date:   Sun Aug 28 22:55:37 2022 +0800

    fix test

commit 174254c
Merge: 4b00f0a a8f524b
Author: Yiding Cui <winoros@gmail.com>
Date:   Sun Aug 28 22:24:10 2022 +0800

    Merge branch 'master' into part-table-topn

commit 4b00f0a
Merge: 027eca6 5006949
Author: Yiding Cui <winoros@gmail.com>
Date:   Sun Aug 28 15:10:27 2022 +0800

    Merge remote-tracking branch 'yiding/part-table-topn' into part-table-topn

commit 027eca6
Author: Yiding Cui <winoros@gmail.com>
Date:   Sun Aug 28 15:09:58 2022 +0800

    fix tests

commit 5006949
Merge: 0575b4c 2858bc1
Author: Yiding Cui <winoros@gmail.com>
Date:   Fri Aug 26 15:32:26 2022 +0800

    Merge branch 'master' into part-table-topn

commit 0575b4c
Author: Yiding Cui <winoros@gmail.com>
Date:   Fri Aug 26 15:06:33 2022 +0800

    fix lint

commit af0cf54
Merge: 2bc49ff b99aebe
Author: Yiding Cui <winoros@gmail.com>
Date:   Fri Aug 26 14:48:26 2022 +0800

    Merge branch 'master' into part-table-topn

commit 2bc49ff
Author: Yiding Cui <winoros@gmail.com>
Date:   Wed Jul 20 17:30:06 2022 +0800

    fix make check

commit f940c71
Author: Yiding Cui <winoros@gmail.com>
Date:   Wed Jul 20 17:23:51 2022 +0800

    make sure that the request is sent first by partition then by region

commit 423f599
Author: Yiding Cui <winoros@gmail.com>
Date:   Wed Jul 20 01:40:45 2022 +0800

    add tests

commit 6ccdc30
Merge: 9f57b9c 39bca97
Author: Yiding Cui <winoros@gmail.com>
Date:   Tue Jul 19 19:53:20 2022 +0800

    Merge remote-tracking branch 'yiding/part-table-topn' into part-table-topn

commit 9f57b9c
Author: Yiding Cui <winoros@gmail.com>
Date:   Tue Jul 19 01:08:00 2022 +0800

    address comments

commit 896a4e0
Merge: 2c08dde 4cade24
Author: Yiding Cui <winoros@gmail.com>
Date:   Mon Jul 18 17:47:49 2022 +0800

    Merge branch 'master' into part-table-topn

commit 39bca97
Merge: 2c08dde a33d971
Author: Weizhen Wang <wangweizhen@pingcap.com>
Date:   Sat Jul 16 11:27:13 2022 +0800

    Merge branch 'master' into part-table-topn

commit 2c08dde
Author: Yiding Cui <winoros@gmail.com>
Date:   Wed Jul 13 20:30:23 2022 +0800

    add tests

commit 3106166
Author: Yiding Cui <winoros@gmail.com>
Date:   Mon Jul 11 23:13:22 2022 +0800

    fix fmt

commit 3993e00
Author: Yiding Cui <winoros@gmail.com>
Date:   Mon Jul 11 23:12:00 2022 +0800

    planner: support push part of order property to partition table

commit 7c9e327
Author: Yiding Cui <winoros@gmail.com>
Date:   Thu Jul 7 17:37:59 2022 +0800

    planner: support push part of order property down to the partition table
  • Loading branch information
winoros committed Oct 28, 2022
1 parent ecd6753 commit c56d702
Show file tree
Hide file tree
Showing 17 changed files with 409 additions and 61 deletions.
2 changes: 1 addition & 1 deletion ddl/db_partition_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1402,7 +1402,7 @@ func TestAlterTableDropPartitionByList(t *testing.T) {
);`)
tk.MustExec(`insert into t values (1),(3),(5),(null)`)
tk.MustExec(`alter table t drop partition p1`)
tk.MustQuery("select * from t").Sort().Check(testkit.Rows("1", "5", "<nil>"))
tk.MustQuery("select * from t order by id").Check(testkit.Rows("<nil>", "1", "5"))
ctx := tk.Session()
is := domain.GetDomain(ctx).InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
Expand Down
16 changes: 16 additions & 0 deletions distsql/request_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,12 @@ func (builder *RequestBuilder) SetKeyRanges(keyRanges []kv.KeyRange) *RequestBui
return builder
}

// SetPartitionKeyRanges sets the "KeyRangesWithPartition" for "kv.Request".
func (builder *RequestBuilder) SetPartitionKeyRanges(keyRanges [][]kv.KeyRange) *RequestBuilder {
builder.Request.KeyRangesWithPartition = keyRanges
return builder
}

// SetStartTS sets "StartTS" for "kv.Request".
func (builder *RequestBuilder) SetStartTS(startTS uint64) *RequestBuilder {
builder.Request.StartTs = startTS
Expand Down Expand Up @@ -326,6 +332,16 @@ func (builder *RequestBuilder) verifyTxnScope() error {
return errors.New("requestBuilder can't decode tableID from keyRange")
}
}
for _, partKeyRanges := range builder.Request.KeyRangesWithPartition {
for _, keyRange := range partKeyRanges {
tableID := tablecodec.DecodeTableID(keyRange.StartKey)
if tableID > 0 {
visitPhysicalTableID[tableID] = struct{}{}
} else {
return errors.New("requestBuilder can't decode tableID from keyRange")
}
}
}

for phyTableID := range visitPhysicalTableID {
valid := VerifyTxnScope(txnScope, phyTableID, builder.is)
Expand Down
9 changes: 4 additions & 5 deletions executor/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -4195,17 +4195,16 @@ func (h kvRangeBuilderFromRangeAndPartition) buildKeyRangeSeparately(ranges []*r
return pids, ret, nil
}

func (h kvRangeBuilderFromRangeAndPartition) buildKeyRange(ranges []*ranger.Range) ([]kv.KeyRange, error) {
//nolint: prealloc
var ret []kv.KeyRange
for _, p := range h.partitions {
func (h kvRangeBuilderFromRangeAndPartition) buildKeyRange(ranges []*ranger.Range) ([][]kv.KeyRange, error) {
ret := make([][]kv.KeyRange, len(h.partitions))
for i, p := range h.partitions {
pid := p.GetPhysicalID()
meta := p.Meta()
kvRange, err := distsql.TableHandleRangesToKVRanges(h.sctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && meta.IsCommonHandle, ranges, nil)
if err != nil {
return nil, err
}
ret = append(ret, kvRange...)
ret[i] = append(ret[i], kvRange...)
}
return ret, nil
}
Expand Down
3 changes: 0 additions & 3 deletions executor/distsql.go
Original file line number Diff line number Diff line change
Expand Up @@ -457,9 +457,6 @@ func (e *IndexLookUpExecutor) Open(ctx context.Context) error {
func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) {
sc := e.ctx.GetSessionVars().StmtCtx
if e.partitionTableMode {
if e.keepOrder { // this case should be prevented by the optimizer
return errors.New("invalid execution plan: cannot keep order when accessing a partition table by IndexLookUpReader")
}
e.feedback.Invalidate() // feedback for partition tables is not ready
e.partitionKVRanges = make([][]kv.KeyRange, 0, len(e.prunedPartitions))
for _, p := range e.prunedPartitions {
Expand Down
143 changes: 141 additions & 2 deletions executor/partition_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ partition p2 values less than (10))`)
// Table reader: one partition
tk.MustQuery("select * from pt where c > 8").Check(testkit.Rows("9 9"))
// Table reader: more than one partition
tk.MustQuery("select * from pt where c < 2 or c >= 9").Check(testkit.Rows("0 0", "9 9"))
tk.MustQuery("select * from pt where c < 2 or c >= 9").Sort().Check(testkit.Rows("0 0", "9 9"))

// Index reader
tk.MustQuery("select c from pt").Sort().Check(testkit.Rows("0", "2", "4", "6", "7", "9", "<nil>"))
Expand All @@ -96,7 +96,7 @@ partition p2 values less than (10))`)
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6", "7 7", "9 9", "<nil> <nil>"))
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10").Check(testkit.Rows())
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8").Check(testkit.Rows("9 9"))
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9").Check(testkit.Rows("0 0", "9 9"))
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9").Sort().Check(testkit.Rows("0 0", "9 9"))

// Index Merge
tk.MustExec("set @@tidb_enable_index_merge = 1")
Expand Down Expand Up @@ -377,14 +377,67 @@ func TestOrderByandLimit(t *testing.T) {
// regular table
tk.MustExec("create table tregular(a int, b int, index idx_a(a))")

// range partition table with int pk
tk.MustExec(`create table trange_intpk(a int primary key, b int) partition by range(a) (
partition p0 values less than(300),
partition p1 values less than (500),
partition p2 values less than(1100));`)

// hash partition table with int pk
tk.MustExec("create table thash_intpk(a int primary key, b int) partition by hash(a) partitions 4;")

// regular table with int pk
tk.MustExec("create table tregular_intpk(a int primary key, b int)")

// range partition table with clustered index
tk.MustExec(`create table trange_clustered(a int, b int, primary key(a, b) clustered) partition by range(a) (
partition p0 values less than(300),
partition p1 values less than (500),
partition p2 values less than(1100));`)

// hash partition table with clustered index
tk.MustExec("create table thash_clustered(a int, b int, primary key(a, b) clustered) partition by hash(a) partitions 4;")

// regular table with clustered index
tk.MustExec("create table tregular_clustered(a int, b int, primary key(a, b) clustered)")

// generate some random data to be inserted
vals := make([]string, 0, 2000)
for i := 0; i < 2000; i++ {
vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(1100), rand.Intn(2000)))
}

dedupValsA := make([]string, 0, 2000)
dedupMapA := make(map[int]struct{}, 2000)
for i := 0; i < 2000; i++ {
valA := rand.Intn(1100)
if _, ok := dedupMapA[valA]; ok {
continue
}
dedupValsA = append(dedupValsA, fmt.Sprintf("(%v, %v)", valA, rand.Intn(2000)))
dedupMapA[valA] = struct{}{}
}

dedupValsAB := make([]string, 0, 2000)
dedupMapAB := make(map[string]struct{}, 2000)
for i := 0; i < 2000; i++ {
val := fmt.Sprintf("(%v, %v)", rand.Intn(1100), rand.Intn(2000))
if _, ok := dedupMapAB[val]; ok {
continue
}
dedupValsAB = append(dedupValsAB, val)
dedupMapAB[val] = struct{}{}
}

tk.MustExec("insert into trange values " + strings.Join(vals, ","))
tk.MustExec("insert into thash values " + strings.Join(vals, ","))
tk.MustExec("insert into tregular values " + strings.Join(vals, ","))
tk.MustExec("insert into trange_intpk values " + strings.Join(dedupValsA, ","))
tk.MustExec("insert into thash_intpk values " + strings.Join(dedupValsA, ","))
tk.MustExec("insert into tregular_intpk values " + strings.Join(dedupValsA, ","))
tk.MustExec("insert into trange_clustered values " + strings.Join(dedupValsAB, ","))
tk.MustExec("insert into thash_clustered values " + strings.Join(dedupValsAB, ","))
tk.MustExec("insert into tregular_clustered values " + strings.Join(dedupValsAB, ","))

// test indexLookUp
for i := 0; i < 100; i++ {
Expand All @@ -398,6 +451,29 @@ func TestOrderByandLimit(t *testing.T) {
tk.MustQuery(queryPartition).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows())
}

// test indexLookUp with order property pushed down.
for i := 0; i < 100; i++ {
// explain select * from t where a > {y} use index(idx_a) order by a limit {x}; // check if IndexLookUp is used
// select * from t where a > {y} use index(idx_a) order by a limit {x}; // it can return the correct result
x := rand.Intn(1099)
y := rand.Intn(2000) + 1
// Since we only use order by a not order by a, b, the result is not stable when we read both a and b.
// We cut the max element so that the result can be stable.
maxEle := tk.MustQuery(fmt.Sprintf("select ifnull(max(a), 1100) from (select * from tregular use index(idx_a) where a > %v order by a limit %v) t", x, y)).Rows()[0][0]
queryRangePartitionWithLimitHint := fmt.Sprintf("select /*+ LIMIT_TO_COP() */ * from trange use index(idx_a) where a > %v and a < greatest(%v+1, %v) order by a limit %v", x, x+1, maxEle, y)
queryHashPartitionWithLimitHint := fmt.Sprintf("select /*+ LIMIT_TO_COP() */ * from thash use index(idx_a) where a > %v and a < greatest(%v+1, %v) order by a limit %v", x, x+1, maxEle, y)
queryRegular := fmt.Sprintf("select * from tregular use index(idx_a) where a > %v and a < greatest(%v+1, %v) order by a limit %v;", x, x+1, maxEle, y)
require.True(t, tk.HasPlan(queryRangePartitionWithLimitHint, "Limit"))
require.True(t, tk.HasPlan(queryRangePartitionWithLimitHint, "IndexLookUp"))
require.True(t, tk.HasPlan(queryHashPartitionWithLimitHint, "Limit"))
require.True(t, tk.HasPlan(queryHashPartitionWithLimitHint, "IndexLookUp"))
require.True(t, tk.HasPlan(queryRangePartitionWithLimitHint, "TopN")) // but not fully pushed
require.True(t, tk.HasPlan(queryHashPartitionWithLimitHint, "TopN"))
regularResult := tk.MustQuery(queryRegular).Sort().Rows()
tk.MustQuery(queryRangePartitionWithLimitHint).Sort().Check(regularResult)
tk.MustQuery(queryHashPartitionWithLimitHint).Sort().Check(regularResult)
}

// test tableReader
for i := 0; i < 100; i++ {
// explain select * from t where a > {y} ignore index(idx_a) order by a limit {x}; // check if IndexLookUp is used
Expand All @@ -410,6 +486,51 @@ func TestOrderByandLimit(t *testing.T) {
tk.MustQuery(queryPartition).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows())
}

// test tableReader with order property pushed down.
for i := 0; i < 100; i++ {
// explain select * from t where a > {y} ignore index(idx_a) order by a limit {x}; // check if IndexLookUp is used
// select * from t where a > {y} ignore index(idx_a) order by a limit {x}; // it can return the correct result
x := rand.Intn(1099)
y := rand.Intn(2000) + 1
queryRangePartition := fmt.Sprintf("select /*+ LIMIT_TO_COP() */ * from trange ignore index(idx_a) where a > %v order by a, b limit %v;", x, y)
queryHashPartition := fmt.Sprintf("select /*+ LIMIT_TO_COP() */ * from thash ignore index(idx_a) where a > %v order by a, b limit %v;", x, y)
queryRegular := fmt.Sprintf("select * from tregular ignore index(idx_a) where a > %v order by a, b limit %v;", x, y)
require.True(t, tk.HasPlan(queryRangePartition, "TableReader")) // check if tableReader is used
require.True(t, tk.HasPlan(queryHashPartition, "TableReader"))
require.False(t, tk.HasPlan(queryRangePartition, "Limit")) // check if order property is not pushed
require.False(t, tk.HasPlan(queryHashPartition, "Limit"))
regularResult := tk.MustQuery(queryRegular).Sort().Rows()
tk.MustQuery(queryRangePartition).Sort().Check(regularResult)
tk.MustQuery(queryHashPartition).Sort().Check(regularResult)

// test int pk
// To be simplified, we only read column a.
queryRangePartition = fmt.Sprintf("select /*+ LIMIT_TO_COP() */ a from trange_intpk use index(primary) where a > %v order by a limit %v", x, y)
queryHashPartition = fmt.Sprintf("select /*+ LIMIT_TO_COP() */ a from thash_intpk use index(primary) where a > %v order by a limit %v", x, y)
queryRegular = fmt.Sprintf("select a from tregular_intpk where a > %v order by a limit %v", x, y)
require.True(t, tk.HasPlan(queryRangePartition, "TableReader"))
require.True(t, tk.HasPlan(queryHashPartition, "TableReader"))
require.True(t, tk.HasPlan(queryRangePartition, "Limit")) // check if order property is not pushed
require.True(t, tk.HasPlan(queryHashPartition, "Limit"))
regularResult = tk.MustQuery(queryRegular).Rows()
tk.MustQuery(queryRangePartition).Check(regularResult)
tk.MustQuery(queryHashPartition).Check(regularResult)

// test clustered index
queryRangePartition = fmt.Sprintf("select /*+ LIMIT_TO_COP() */ * from trange_clustered use index(primary) where a > %v order by a, b limit %v;", x, y)
queryHashPartition = fmt.Sprintf("select /*+ LIMIT_TO_COP() */ * from thash_clustered use index(primary) where a > %v order by a, b limit %v;", x, y)
queryRegular = fmt.Sprintf("select * from tregular_clustered where a > %v order by a, b limit %v;", x, y)
require.True(t, tk.HasPlan(queryRangePartition, "TableReader")) // check if tableReader is used
require.True(t, tk.HasPlan(queryHashPartition, "TableReader"))
require.True(t, tk.HasPlan(queryRangePartition, "Limit")) // check if order property is pushed
require.True(t, tk.HasPlan(queryHashPartition, "Limit"))
require.True(t, tk.HasPlan(queryRangePartition, "TopN")) // but not fully pushed
require.True(t, tk.HasPlan(queryHashPartition, "TopN"))
regularResult = tk.MustQuery(queryRegular).Rows()
tk.MustQuery(queryRangePartition).Check(regularResult)
tk.MustQuery(queryHashPartition).Check(regularResult)
}

// test indexReader
for i := 0; i < 100; i++ {
// explain select a from t where a > {y} use index(idx_a) order by a limit {x}; // check if IndexLookUp is used
Expand All @@ -422,6 +543,24 @@ func TestOrderByandLimit(t *testing.T) {
tk.MustQuery(queryPartition).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows())
}

// test indexReader with order property pushed down.
for i := 0; i < 100; i++ {
// explain select a from t where a > {y} use index(idx_a) order by a limit {x}; // check if IndexLookUp is used
// select a from t where a > {y} use index(idx_a) order by a limit {x}; // it can return the correct result
x := rand.Intn(1099)
y := rand.Intn(2000) + 1
queryRangePartition := fmt.Sprintf("select /*+ LIMIT_TO_COP() */ a from trange use index(idx_a) where a > %v order by a limit %v;", x, y)
queryHashPartition := fmt.Sprintf("select /*+ LIMIT_TO_COP() */ a from trange use index(idx_a) where a > %v order by a limit %v;", x, y)
queryRegular := fmt.Sprintf("select a from tregular use index(idx_a) where a > %v order by a limit %v;", x, y)
require.True(t, tk.HasPlan(queryRangePartition, "IndexReader")) // check if indexReader is used
require.True(t, tk.HasPlan(queryHashPartition, "IndexReader"))
require.True(t, tk.HasPlan(queryRangePartition, "Limit")) // check if order property is pushed
require.True(t, tk.HasPlan(queryHashPartition, "Limit"))
regularResult := tk.MustQuery(queryRegular).Sort().Rows()
tk.MustQuery(queryRangePartition).Sort().Check(regularResult)
tk.MustQuery(queryHashPartition).Sort().Check(regularResult)
}

// test indexMerge
for i := 0; i < 100; i++ {
// explain select /*+ use_index_merge(t) */ * from t where a > 2 or b < 5 order by a limit {x}; // check if IndexMerge is used
Expand Down
37 changes: 29 additions & 8 deletions executor/table_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func (sr selectResultHook) SelectResult(ctx context.Context, sctx sessionctx.Con
}

type kvRangeBuilder interface {
buildKeyRange(ranges []*ranger.Range) ([]kv.KeyRange, error)
buildKeyRange(ranges []*ranger.Range) ([][]kv.KeyRange, error)
buildKeyRangeSeparately(ranges []*ranger.Range) ([]int64, [][]kv.KeyRange, error)
}

Expand Down Expand Up @@ -201,13 +201,25 @@ func (e *TableReaderExecutor) Open(ctx context.Context) error {
if err != nil {
return err
}
e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...)
if len(kvReq.KeyRanges) > 0 {
e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...)
} else {
for _, kr := range kvReq.KeyRangesWithPartition {
e.kvRanges = append(e.kvRanges, kr...)
}
}
if len(secondPartRanges) != 0 {
kvReq, err = e.buildKVReq(ctx, secondPartRanges)
if err != nil {
return err
}
e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...)
if len(kvReq.KeyRanges) > 0 {
e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...)
} else {
for _, kr := range kvReq.KeyRangesWithPartition {
e.kvRanges = append(e.kvRanges, kr...)
}
}
}
return nil
}
Expand Down Expand Up @@ -310,10 +322,19 @@ func (e *TableReaderExecutor) buildResp(ctx context.Context, ranges []*ranger.Ra
if err != nil {
return nil, err
}
slices.SortFunc(kvReq.KeyRanges, func(i, j kv.KeyRange) bool {
return bytes.Compare(i.StartKey, j.StartKey) < 0
})
e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...)
if len(kvReq.KeyRanges) > 0 {
slices.SortFunc(kvReq.KeyRanges, func(i, j kv.KeyRange) bool {
return bytes.Compare(i.StartKey, j.StartKey) < 0
})
e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...)
} else {
for _, kr := range kvReq.KeyRangesWithPartition {
slices.SortFunc(kr, func(i, j kv.KeyRange) bool {
return bytes.Compare(i.StartKey, j.StartKey) < 0
})
e.kvRanges = append(e.kvRanges, kr...)
}
}

result, err := e.SelectResult(ctx, e.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id)
if err != nil {
Expand Down Expand Up @@ -405,7 +426,7 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R
if err != nil {
return nil, err
}
reqBuilder = builder.SetKeyRanges(kvRange)
reqBuilder = builder.SetPartitionKeyRanges(kvRange)
} else {
reqBuilder = builder.SetHandleRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(e.table), e.table.Meta() != nil && e.table.Meta().IsCommonHandle, ranges, e.feedback)
}
Expand Down
4 changes: 4 additions & 0 deletions kv/kv.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,10 @@ type Request struct {
Data []byte
KeyRanges []KeyRange

// KeyRangesWithPartition makes sure that the request is sent first by partition then by region.
// When the table is small, it's possible that multiple partitions are in the same region.
KeyRangesWithPartition [][]KeyRange

// For PartitionTableScan used by tiflash.
PartitionIDAndRanges []PartitionIDAndRanges

Expand Down
2 changes: 1 addition & 1 deletion planner/cascades/testdata/integration_suite_in.json
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@
{
"name": "TestCascadePlannerHashedPartTable",
"cases": [
"select * from pt1"
"select * from pt1 order by a"
]
},
{
Expand Down
13 changes: 7 additions & 6 deletions planner/cascades/testdata/integration_suite_out.json
Original file line number Diff line number Diff line change
Expand Up @@ -1197,17 +1197,18 @@
"Name": "TestCascadePlannerHashedPartTable",
"Cases": [
{
"SQL": "select * from pt1",
"SQL": "select * from pt1 order by a",
"Plan": [
"TableReader_5 10000.00 root partition:all data:TableFullScan_6",
"└─TableFullScan_6 10000.00 cop[tikv] table:pt1 keep order:false, stats:pseudo"
"Sort_11 10000.00 root test.pt1.a",
"└─TableReader_9 10000.00 root partition:all data:TableFullScan_10",
" └─TableFullScan_10 10000.00 cop[tikv] table:pt1 keep order:false, stats:pseudo"
],
"Result": [
"4 40",
"1 10",
"5 50",
"2 20",
"3 30"
"3 30",
"4 40",
"5 50"
]
}
]
Expand Down
1 change: 1 addition & 0 deletions planner/core/find_best_task.go
Original file line number Diff line number Diff line change
Expand Up @@ -2234,6 +2234,7 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper
physicalTableID: ds.physicalTableID,
tblColHists: ds.TblColHists,
pkIsHandleCol: ds.getPKIsHandleCol(),
constColsByCond: path.ConstCols,
prop: prop,
}.Init(ds.ctx, ds.blockOffset)
statsTbl := ds.statisticTable
Expand Down
Loading

0 comments on commit c56d702

Please sign in to comment.