Skip to content

Commit

Permalink
update scaling phase
Browse files Browse the repository at this point in the history
  • Loading branch information
DanielZhangQD committed Jun 18, 2020
1 parent e3687e7 commit 36d3185
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 166 deletions.
3 changes: 1 addition & 2 deletions pkg/apis/pingcap/v1alpha1/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,7 @@ func (tc *TidbCluster) TiKVUpgrading() bool {
}

func (tc *TidbCluster) TiKVScaling() bool {
return tc.Status.TiKV.Phase == ScaleOutPhase ||
tc.Status.TiKV.Phase == ScaleInPhase
return tc.Status.TiKV.Phase == ScalePhase
}

func (tc *TidbCluster) TiDBUpgrading() bool {
Expand Down
6 changes: 2 additions & 4 deletions pkg/apis/pingcap/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,8 @@ const (
NormalPhase MemberPhase = "Normal"
// UpgradePhase represents the upgrade state of TiDB cluster.
UpgradePhase MemberPhase = "Upgrade"
// ScaleInPhase represents the scaling in state of TiDB cluster.
ScaleInPhase MemberPhase = "ScaleIn"
// ScaleOutPhase represents the scaling out state of TiDB cluster.
ScaleOutPhase MemberPhase = "ScaleOut"
// ScalePhase represents the scaling state of TiDB cluster.
ScalePhase MemberPhase = "Scale"
)

// ConfigUpdateStrategy represents the strategy to update configuration
Expand Down
4 changes: 3 additions & 1 deletion pkg/manager/member/tikv_member_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,7 +633,9 @@ func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s
}
if upgrading && tc.Status.PD.Phase != v1alpha1.UpgradePhase {
tc.Status.TiKV.Phase = v1alpha1.UpgradePhase
} else if !tc.TiKVScaling() {
} else if tc.TiKVStsDesiredReplicas() != *set.Spec.Replicas {
tc.Status.TiKV.Phase = v1alpha1.ScalePhase
} else {
tc.Status.TiKV.Phase = v1alpha1.NormalPhase
}

Expand Down
12 changes: 8 additions & 4 deletions pkg/manager/member/tikv_member_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -799,11 +799,15 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) {
status := apps.StatefulSetStatus{
Replicas: int32(3),
}
spec := apps.StatefulSetSpec{
Replicas: pointer.Int32Ptr(3),
}
now := metav1.Time{Time: time.Now()}
testFn := func(test *testcase, t *testing.T) {
tc := newTidbClusterForPD()
tc.Status.PD.Phase = v1alpha1.NormalPhase
set := &apps.StatefulSet{
Spec: spec,
Status: status,
}
if test.updateTC != nil {
Expand Down Expand Up @@ -913,7 +917,7 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) {
{
name: "statefulset is scaling out",
updateTC: func(tc *v1alpha1.TidbCluster) {
tc.Status.TiKV.Phase = v1alpha1.ScaleOutPhase
tc.Spec.TiKV.Replicas = 4
},
upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) {
return false, nil
Expand All @@ -925,13 +929,13 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) {
errExpectFn: nil,
tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) {
g.Expect(tc.Status.TiKV.StatefulSet.Replicas).To(Equal(int32(3)))
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.ScaleOutPhase))
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.ScalePhase))
},
},
{
name: "statefulset is scaling in",
updateTC: func(tc *v1alpha1.TidbCluster) {
tc.Status.TiKV.Phase = v1alpha1.ScaleInPhase
tc.Spec.TiKV.Replicas = 2
},
upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) {
return false, nil
Expand All @@ -943,7 +947,7 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) {
errExpectFn: nil,
tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) {
g.Expect(tc.Status.TiKV.StatefulSet.Replicas).To(Equal(int32(3)))
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.ScaleInPhase))
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.ScalePhase))
},
},
{
Expand Down
34 changes: 3 additions & 31 deletions pkg/manager/member/tikv_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,6 @@ func (tsd *tikvScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet,
return tsd.ScaleOut(tc, oldSet, newSet)
} else if scaling < 0 {
return tsd.ScaleIn(tc, oldSet, newSet)
} else {
if tc.TiKVScaling() {
tc.Status.TiKV.Phase = v1alpha1.NormalPhase
}
}
// we only sync auto scaler annotations when we are finishing syncing scaling
return tsd.SyncAutoScalerAnn(tc, oldSet)
Expand All @@ -62,17 +58,7 @@ func (tsd *tikvScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulS
_, ordinal, replicas, deleteSlots := scaleOne(oldSet, newSet)
resetReplicas(newSet, oldSet)
if tc.TiKVUpgrading() {
klog.Infof("the TidbCluster: [%s/%s]'s tikv is upgrading, can not scale out until the upgrade completed",
tc.Namespace, tc.Name)
return nil
}
// During TidbCluster upgrade, if TiKV is scaled at the same time, since
// TiKV cannot be upgraded during PD upgrade, the TiKV scaling will occur
// before the TiKV upgrade, in this case, the Pump, TiDB, TiFlash, TiCDC, etc.
// will be upgraded before TiKV upgrade.
// To avoid this case, we skip the scaling out during PD upgrade.
if tc.PDUpgrading() {
klog.Infof("the TidbCluster: [%s/%s]'s pd is upgrading, can not scale out until the upgrade completed",
klog.Infof("TidbCluster: [%s/%s]'s tikv is upgrading, can not scale out until the upgrade completed",
tc.Namespace, tc.Name)
return nil
}
Expand All @@ -83,7 +69,6 @@ func (tsd *tikvScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulS
return err
}

tc.Status.TiKV.Phase = v1alpha1.ScaleOutPhase
setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
return nil
}
Expand All @@ -98,17 +83,7 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe

// tikv can not scale in when it is upgrading
if tc.TiKVUpgrading() {
klog.Infof("the TidbCluster: [%s/%s]'s tikv is upgrading,can not scale in until upgrade have completed",
ns, tcName)
return nil
}
// During TidbCluster upgrade, if TiKV is scaled at the same time, since
// TiKV cannot be upgraded during PD upgrade, the TiKV scaling will occur
// before the TiKV upgrade, in this case, the Pump, TiDB, TiFlash, TiCDC, etc.
// will be upgraded before TiKV upgrade.
// To avoid this case, we skip the scaling in during PD upgrade.
if tc.PDUpgrading() {
klog.Infof("the TidbCluster: [%s/%s]'s pd is upgrading, can not scale in until the upgrade completed",
klog.Infof("TidbCluster: [%s/%s]'s tikv is upgrading, can not scale in until upgrade completed",
ns, tcName)
return nil
}
Expand All @@ -122,7 +97,6 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
}

if controller.PodWebhookEnabled {
tc.Status.TiKV.Phase = v1alpha1.ScaleInPhase
setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
return nil
}
Expand All @@ -135,7 +109,6 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
return err
}
if state != v1alpha1.TiKVStateOffline {
tc.Status.TiKV.Phase = v1alpha1.ScaleInPhase
if err := controller.GetPDClient(tsd.pdControl, tc).DeleteStore(id); err != nil {
klog.Errorf("tikv scale in: failed to delete store %d, %v", id, err)
return err
Expand Down Expand Up @@ -173,7 +146,7 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
}
klog.Infof("tikv scale in: set pvc %s/%s annotation: %s to %s",
ns, pvcName, label.AnnPVCDeferDeleting, now)
tc.Status.TiKV.Phase = v1alpha1.ScaleInPhase

setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
return nil
}
Expand Down Expand Up @@ -216,7 +189,6 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
}
klog.Infof("pod %s not ready, tikv scale in: set pvc %s/%s annotation: %s to %s",
podName, ns, pvcName, label.AnnPVCDeferDeleting, now)
tc.Status.TiKV.Phase = v1alpha1.ScaleInPhase
setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
return nil
}
Expand Down
Loading

0 comments on commit 36d3185

Please sign in to comment.