diff --git a/docs/api-references/docs.md b/docs/api-references/docs.md
index efb3767c54..adf1bb2614 100644
--- a/docs/api-references/docs.md
+++ b/docs/api-references/docs.md
@@ -16587,6 +16587,21 @@ TiDBInitializer
Initializer is the init configurations of TiDB
+
+
+bootstrapSQLConfigMapName
+
+string
+
+ |
+
+(Optional)
+ BootstrapSQLConfigMapName is the name of the ConfigMap which contains the bootstrap SQL file with the key bootstrap-sql ,
+which will only be executed when a TiDB cluster bootstrap on the first time.
+The field should be set ONLY when create a TC, since it only take effect on the first time bootstrap.
+Only v6.6.0+ supports this feature.
+ |
+
TiDBStatus
diff --git a/manifests/crd.yaml b/manifests/crd.yaml
index 8430d5a0ac..89159058dd 100644
--- a/manifests/crd.yaml
+++ b/manifests/crd.yaml
@@ -24698,6 +24698,8 @@ spec:
type: string
binlogEnabled:
type: boolean
+ bootstrapSQLConfigMapName:
+ type: string
config:
x-kubernetes-preserve-unknown-fields: true
configUpdateStrategy:
diff --git a/manifests/crd/v1/pingcap.com_tidbclusters.yaml b/manifests/crd/v1/pingcap.com_tidbclusters.yaml
index d62bf7f4f5..fc9077f229 100644
--- a/manifests/crd/v1/pingcap.com_tidbclusters.yaml
+++ b/manifests/crd/v1/pingcap.com_tidbclusters.yaml
@@ -11443,6 +11443,8 @@ spec:
type: string
binlogEnabled:
type: boolean
+ bootstrapSQLConfigMapName:
+ type: string
config:
x-kubernetes-preserve-unknown-fields: true
configUpdateStrategy:
diff --git a/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml b/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml
index 15ea640100..6ed14746df 100644
--- a/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml
+++ b/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml
@@ -11428,6 +11428,8 @@ spec:
type: string
binlogEnabled:
type: boolean
+ bootstrapSQLConfigMapName:
+ type: string
config:
x-kubernetes-preserve-unknown-fields: true
configUpdateStrategy:
diff --git a/manifests/crd_v1beta1.yaml b/manifests/crd_v1beta1.yaml
index fba7cf8e9a..060a2f9cff 100644
--- a/manifests/crd_v1beta1.yaml
+++ b/manifests/crd_v1beta1.yaml
@@ -24678,6 +24678,8 @@ spec:
type: string
binlogEnabled:
type: boolean
+ bootstrapSQLConfigMapName:
+ type: string
config:
x-kubernetes-preserve-unknown-fields: true
configUpdateStrategy:
diff --git a/pkg/apis/pingcap/v1alpha1/openapi_generated.go b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
index df40e9fb22..855cffbb94 100644
--- a/pkg/apis/pingcap/v1alpha1/openapi_generated.go
+++ b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
@@ -8675,6 +8675,13 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBInitializer"),
},
},
+ "bootstrapSQLConfigMapName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "BootstrapSQLConfigMapName is the name of the ConfigMap which contains the bootstrap SQL file with the key `bootstrap-sql`, which will only be executed when a TiDB cluster bootstrap on the first time. The field should be set ONLY when create a TC, since it only take effect on the first time bootstrap. Only v6.6.0+ supports this feature.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
Required: []string{"replicas"},
},
diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
index 46ad5042e2..563c0cd6eb 100644
--- a/pkg/apis/pingcap/v1alpha1/tidbcluster.go
+++ b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
@@ -975,6 +975,14 @@ func (tc *TidbCluster) IsTiDBBinlogEnabled() bool {
return *binlogEnabled
}
+func (tidb *TiDBSpec) IsBootstrapSQLEnabled() bool {
+ if tidb.BootstrapSQLConfigMapName != nil && *tidb.BootstrapSQLConfigMapName != "" {
+ return true
+ }
+
+ return false
+}
+
func (tidb *TiDBSpec) IsTLSClientEnabled() bool {
return tidb.TLSClient != nil && tidb.TLSClient.Enabled
}
diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go
index cc8c21bf22..57ec800532 100644
--- a/pkg/apis/pingcap/v1alpha1/types.go
+++ b/pkg/apis/pingcap/v1alpha1/types.go
@@ -914,6 +914,13 @@ type TiDBSpec struct {
//
// +optional
Initializer *TiDBInitializer `json:"initializer,omitempty"`
+
+ // BootstrapSQLConfigMapName is the name of the ConfigMap which contains the bootstrap SQL file with the key `bootstrap-sql`,
+ // which will only be executed when a TiDB cluster bootstrap on the first time.
+ // The field should be set ONLY when create a TC, since it only take effect on the first time bootstrap.
+ // Only v6.6.0+ supports this feature.
+ // +optional
+ BootstrapSQLConfigMapName *string `json:"bootstrapSQLConfigMapName,omitempty"`
}
type TiDBInitializer struct {
diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation.go b/pkg/apis/pingcap/v1alpha1/validation/validation.go
index 666b4370f5..59d55432af 100644
--- a/pkg/apis/pingcap/v1alpha1/validation/validation.go
+++ b/pkg/apis/pingcap/v1alpha1/validation/validation.go
@@ -536,6 +536,7 @@ func ValidateUpdateTidbCluster(old, tc *v1alpha1.TidbCluster) field.ErrorList {
"The instance must not be mutate or set value other than the cluster name"))
}
allErrs = append(allErrs, validateUpdatePDConfig(old.Spec.PD, tc.Spec.PD, field.NewPath("spec.pd.config"))...)
+ allErrs = append(allErrs, disallowMutateBootstrapSQLConfigMapName(old.Spec.TiDB, tc.Spec.TiDB, field.NewPath("spec.tidb.bootstrapSQLConfigMapName"))...)
allErrs = append(allErrs, disallowUsingLegacyAPIInNewCluster(old, tc)...)
return allErrs
@@ -643,6 +644,23 @@ func validateUpdatePDConfig(oldPdSpec, pdSpec *v1alpha1.PDSpec, path *field.Path
return allErrs
}
+// disallowMutateBootstrapSQLConfigMapName checks if user mutate the bootstrapSQLConfigMapName field.
+// Only allow to update bootstrapSQLConfigMapName from non-nil to nil.
+func disallowMutateBootstrapSQLConfigMapName(old, new *v1alpha1.TiDBSpec, p *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if old == nil || new == nil {
+ return allErrs
+ }
+
+ bootstrapSQLSpecified := old.BootstrapSQLConfigMapName != nil && new.BootstrapSQLConfigMapName != nil
+ if (bootstrapSQLSpecified && *old.BootstrapSQLConfigMapName != *new.BootstrapSQLConfigMapName) ||
+ (!bootstrapSQLSpecified && new.BootstrapSQLConfigMapName != nil) {
+ return append(allErrs, field.Invalid(p, new.BootstrapSQLConfigMapName, "bootstrapSQLConfigMapName is immutable"))
+ }
+
+ return allErrs
+}
+
func validateDeleteSlots(annotations map[string]string, key string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if annotations != nil {
diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go
index a40b6d5df1..318a0ada5c 100644
--- a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go
+++ b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go
@@ -749,3 +749,74 @@ func TestValidatePDSpec(t *testing.T) {
})
}
}
+
+func Test_disallowMutateBootstrapSQLConfigMapName(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ old *v1alpha1.TiDBSpec
+ new *v1alpha1.TiDBSpec
+ wantError bool
+ }{
+ {
+ name: "no change, both nil",
+ old: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: nil,
+ },
+ new: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: nil,
+ },
+ wantError: false,
+ },
+ {
+ name: "no change, both non-nil",
+ old: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: pointer.StringPtr("old"),
+ },
+ new: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: pointer.StringPtr("old"),
+ },
+ wantError: false,
+ },
+ {
+ name: "mutate from non-nil to nil",
+ old: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: pointer.StringPtr("old"),
+ },
+ new: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: nil,
+ },
+ wantError: false,
+ },
+ {
+ name: "mutate from nil to non-nil",
+ old: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: nil,
+ },
+ new: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: pointer.StringPtr("new"),
+ },
+ wantError: true,
+ },
+ {
+ name: "mutate from non-nil to non-nil",
+ old: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: pointer.StringPtr("old"),
+ },
+ new: &v1alpha1.TiDBSpec{
+ BootstrapSQLConfigMapName: pointer.StringPtr("new"),
+ },
+ wantError: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ errs := disallowMutateBootstrapSQLConfigMapName(tt.old, tt.new, field.NewPath("spec.tidb.bootstrapSQLConfigMapName"))
+ if tt.wantError {
+ g.Expect(len(errs)).NotTo(Equal(0))
+ } else {
+ g.Expect(len(errs)).To(Equal(0))
+ }
+ })
+ }
+}
diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
index 376809e6dc..2415f14d6c 100644
--- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
@@ -5954,6 +5954,11 @@ func (in *TiDBSpec) DeepCopyInto(out *TiDBSpec) {
*out = new(TiDBInitializer)
**out = **in
}
+ if in.BootstrapSQLConfigMapName != nil {
+ in, out := &in.BootstrapSQLConfigMapName, &out.BootstrapSQLConfigMapName
+ *out = new(string)
+ **out = **in
+ }
return
}
diff --git a/pkg/manager/member/startscript/v1/render_script.go b/pkg/manager/member/startscript/v1/render_script.go
index c91cc2eae3..1d3ae15e30 100644
--- a/pkg/manager/member/startscript/v1/render_script.go
+++ b/pkg/manager/member/startscript/v1/render_script.go
@@ -83,7 +83,6 @@ func RenderTiDBStartScript(tc *v1alpha1.TidbCluster) (string, error) {
PluginDirectory: "/plugins",
PluginList: strings.Join(plugins, ","),
}
-
model.Path = "${CLUSTER_NAME}-pd:2379"
if tc.AcrossK8s() {
model.Path = "${CLUSTER_NAME}-pd:2379" // get pd addr from discovery in startup script
diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go
index 453834d837..8cf4cb0153 100644
--- a/pkg/manager/member/tidb_member_manager.go
+++ b/pkg/manager/member/tidb_member_manager.go
@@ -73,6 +73,9 @@ const (
// tidb DC label Name
tidbDCLabel = "zone"
+
+ bootstrapSQLFilePath = "/etc/tidb-bootstrap"
+ bootstrapSQLFileName = "bootstrap.sql"
)
var (
@@ -579,6 +582,9 @@ func getTiDBConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) {
config.Set("security.ssl-cert", path.Join(serverCertPath, corev1.TLSCertKey))
config.Set("security.ssl-key", path.Join(serverCertPath, corev1.TLSPrivateKeyKey))
}
+ if tc.Spec.TiDB.IsBootstrapSQLEnabled() {
+ config.Set("initialize-sql-file", path.Join(bootstrapSQLFilePath, bootstrapSQLFileName))
+ }
confText, err := config.MarshalTOML()
if err != nil {
return nil, err
@@ -767,6 +773,22 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
},
})
}
+ if tc.Spec.TiDB != nil && tc.Spec.TiDB.IsBootstrapSQLEnabled() {
+ volMounts = append(volMounts, corev1.VolumeMount{
+ Name: "tidb-bootstrap-sql", ReadOnly: true, MountPath: bootstrapSQLFilePath,
+ })
+
+ vols = append(vols, corev1.Volume{
+ Name: "tidb-bootstrap-sql", VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: *tc.Spec.TiDB.BootstrapSQLConfigMapName,
+ },
+ Items: []corev1.KeyToPath{{Key: "bootstrap-sql", Path: bootstrapSQLFileName}},
+ },
+ },
+ })
+ }
if tc.IsTLSClusterEnabled() {
vols = append(vols, corev1.Volume{
Name: "tidb-tls", VolumeSource: corev1.VolumeSource{
diff --git a/tests/e2e/tidbcluster/across-kubernetes.go b/tests/e2e/tidbcluster/across-kubernetes.go
index ca670331ab..fb9a1e0ed1 100644
--- a/tests/e2e/tidbcluster/across-kubernetes.go
+++ b/tests/e2e/tidbcluster/across-kubernetes.go
@@ -155,7 +155,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
MustCreateXK8sTCWithComponentsReady(genericCli, oa, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, false)
ginkgo.By("Check deploy status of all clusters")
- err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 3*time.Minute)
+ err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
})
@@ -203,7 +203,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
framework.ExpectNoError(genericCli.Delete(context.TODO(), tc2), "failed to delete cluster 2")
ginkgo.By("Check status of tc1")
- err = oa.WaitForTidbClusterReady(tc1, 2*time.Minute, 30*time.Second)
+ err = oa.WaitForTidbClusterReady(tc1, 25*time.Minute, 30*time.Second)
framework.ExpectNoError(err, "timeout to wait for tc1 to be healthy")
// connectable test
@@ -222,7 +222,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
MustCreateXK8sTCWithComponentsReady(genericCli, oa, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, false)
ginkgo.By("Check status over all clusters")
- err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 3*time.Minute)
+ err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
})
@@ -243,7 +243,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
return nil
})
framework.ExpectNoError(err, "failed to update cluster domain of cluster-1 %s/%s", tc1.Namespace, tc1.Name)
- err = oa.WaitForTidbClusterReady(tc1, 30*time.Minute, 30*time.Second)
+ err = oa.WaitForTidbClusterReady(tc1, 25*time.Minute, 30*time.Second)
framework.ExpectNoError(err, "failed to wait for cluster-1 ready: %s/%s", tc1.Namespace, tc1.Name)
localHost, localPort, cancel, err := portforward.ForwardOnePort(fw, tc1.Namespace, fmt.Sprintf("svc/%s-pd", tc1.Name), 2379)
@@ -276,7 +276,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
MustCreateXK8sTCWithComponentsReady(genericCli, oa, []*v1alpha1.TidbCluster{tc2, tc3}, false)
ginkgo.By("Deploy status of all clusters")
- err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 3*time.Minute)
+ err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
})
@@ -317,7 +317,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
framework.ExpectEqual(foundSecretName, true)
ginkgo.By("Check deploy status over all clusters")
- err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 3*time.Minute)
+ err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
ginkgo.By("Connecting to tidb server to verify the connection is TLS enabled")
@@ -340,7 +340,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
MustCreateXK8sTCWithComponentsReady(genericCli, oa, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, false)
ginkgo.By("Check deploy status of all clusters")
- err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 3*time.Minute)
+ err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2, tc3}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
})
@@ -417,7 +417,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
MustCreateXK8sTCWithComponentsReady(genericCli, oa, []*v1alpha1.TidbCluster{tc1, tc2}, false)
ginkgo.By("Check deploy status of all clusters")
- err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 3*time.Minute)
+ err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
})
@@ -439,7 +439,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
MustCreateXK8sTCWithComponentsReady(genericCli, oa, []*v1alpha1.TidbCluster{tc1, tc2}, true)
ginkgo.By("Check deploy status of all clusters")
- err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 3*time.Minute)
+ err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
})
}
@@ -581,11 +581,11 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
framework.ExpectNoError(err, "waiting namespsace %q to be deleted", ns2)
ginkgo.By("Check status of other clusters")
- err = oa.WaitForTidbClusterReady(tc1, 10*time.Minute, 30*time.Second)
+ err = oa.WaitForTidbClusterReady(tc1, 25*time.Minute, 30*time.Second)
framework.ExpectNoError(err, "%q cluster not healthy after cluster %q fail", tcName1, tcName2)
- err = oa.WaitForTidbClusterReady(tc3, 10*time.Minute, 30*time.Second)
+ err = oa.WaitForTidbClusterReady(tc3, 25*time.Minute, 30*time.Second)
framework.ExpectNoError(err, "%q cluster not healthy after cluster %q fail", tcName3, tcName2)
- err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc3}, 5*time.Second, 3*time.Minute)
+ err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc3}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status after cluster %q fail", tcName2)
ginkgo.By("Check functionality of other clusters by querying tidb")
@@ -634,7 +634,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
tc2.Spec.TLSCluster = &v1alpha1.TLSCluster{Enabled: true}
err = genericCli.Create(context.TODO(), tc2)
framework.ExpectNoError(err, "create TidbCluster %q", tc2.Name)
- err = oa.WaitForTidbClusterReady(tc2, 5*time.Minute, 10*time.Second)
+ err = oa.WaitForTidbClusterReady(tc2, 25*time.Minute, 10*time.Second)
framework.ExpectError(err, "%q should not be able to join %q as pd fails", tcName2, tcName1)
ginkgo.By("Recover PD in cluster-1")
@@ -648,9 +648,9 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
framework.ExpectNoError(err, "deleting sts of pd for %q", tcName1)
ginkgo.By("Join cluster-2 into cluster-1 when pd running normally")
- err = oa.WaitForTidbClusterReady(tc2, 10*time.Minute, 30*time.Second)
+ err = oa.WaitForTidbClusterReady(tc2, 25*time.Minute, 30*time.Second)
framework.ExpectNoError(err, "waiting for %q ready", tcName2)
- err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 3*time.Minute)
+ err = CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "%q failed to join into %q", tcName2, tcName1)
})
})
@@ -699,7 +699,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
MustCreateXK8sTCWithComponentsReady(genericCli, oa, []*v1alpha1.TidbCluster{tc1, tc2}, false)
ginkgo.By("Check deploy status of all clusters")
- err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 3*time.Minute)
+ err := CheckStatusWhenAcrossK8sWithTimeout(cli, []*v1alpha1.TidbCluster{tc1, tc2}, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err, "failed to check status")
})
@@ -730,7 +730,7 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() {
utiltc.MustWaitForComponentPhase(cli, tc, v1alpha1.PDMemberType, v1alpha1.UpgradePhase, 3*time.Minute, time.Second*3)
ginkgo.By("Wait for cluster is ready")
- err = oa.WaitForTidbClusterReady(tc, 15*time.Minute, 10*time.Second)
+ err = oa.WaitForTidbClusterReady(tc, 25*time.Minute, 10*time.Second)
framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc.Name)
ginkgo.By("Check status of components not changed")
@@ -841,7 +841,7 @@ func MustCreateXK8sTCWithComponentsReady(cli ctrlCli.Client, oa *tests.OperatorA
// for faster cluster creation, checking status after creating all clusters.
for _, tc := range tidbclusters {
- err := oa.WaitForTidbClusterReady(tc, 15*time.Minute, 10*time.Second)
+ err := oa.WaitForTidbClusterReady(tc, 25*time.Minute, 10*time.Second)
framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", tc.Namespace, tc.Name)
}
}