Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix typo (complied -> compiled) #61835

Merged
merged 1 commit into from
Feb 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/amp/amp_base_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ def build_while_model():

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA and not support amp.",
"core is not compiled with CUDA and not support amp.",
)
class AmpTestBase(unittest.TestCase):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/amp/test_amp_master_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def forward(self, x):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_float16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the float16",
"core is not compiled with CUDA and not support the float16",
)
@unittest.skipIf(
not core.is_compiled_with_cuda()
Expand Down
2 changes: 1 addition & 1 deletion test/distribution/test_dirichlet_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def _hypothesis_testing(self, outs):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestDirichletBF16Op(OpTest):
# Because dirichlet random sample have not gradient, we skip gradient check.
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/fused_pass/test_fused_dropout_add_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFusedDropoutAdd(unittest.TestCase):
def _test_fused_dropout_add(self):
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/fused_pass/test_fused_gemm_epilogue_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def get_cuda_version():

@unittest.skipIf(
not core.is_compiled_with_cuda() or get_cuda_version() < 11060,
"core is not complied with CUDA or nvcc version is less than11.6",
"core is not compiled with CUDA or nvcc version is less than11.6",
)
class TestFusedGemm_epilogueAdd(unittest.TestCase):
def test_fused_gemm_epilogue_add(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_arange.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def test_check_output(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestBFloat16ArangeOp(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_argsort_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ def init_direction(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestArgsortBF16Op(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_bernoulli_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def init_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestBernoulliBF16Op(TestBernoulliOp):
def init_dtype(self):
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_binomial_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def test_fixed_random_number(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_float16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the float16",
"core is not compiled with CUDA and not support the float16",
)
class TestBinomialFP16Op(TestBinomialOp):
def init_dtype(self):
Expand All @@ -247,7 +247,7 @@ def verify_output(self, outs):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestBinomialBF16Op(TestBinomialOp):
def init_dtype(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_channel_shuffle.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ def init_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestChannelShuffleBF16OP(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_conj_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def init_dtype_type(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestConjBF16(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_diag_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def init_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestDiagV2BF16OP(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_diagonal_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def init_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestDiagonalBF16OP(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_dropout_nd_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def test_check_grad_normal(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestDropoutNdBF16Op(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_empty_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ def init_config(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestEmptyBF16Op(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_erf_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def test_check_grad(self):
or not paddle.base.core.is_bfloat16_supported(
paddle.base.core.CUDAPlace(0)
),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestErfBF16OP(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_erfinv_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def init_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestErfinvBF16Op(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_eye_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def init_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestEyeBF16OP(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_fill_diagonal_tensor_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def init_kernel_type(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TensorFillDiagTensorBF16(OpTest):
def setUp(self):
Expand Down
30 changes: 15 additions & 15 deletions test/legacy_test/test_flatten_contiguous_range_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16Op(TestFlattenOp):
def init_test_dtype(self):
Expand All @@ -115,7 +115,7 @@ def init_test_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op(TestFlattenOp):
def if_enable_cinn(self):
Expand Down Expand Up @@ -146,7 +146,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16Op_1(TestFlattenOp_1):
def init_test_dtype(self):
Expand All @@ -156,7 +156,7 @@ def init_test_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_1(TestFlattenOp_1):
def if_enable_cinn(self):
Expand Down Expand Up @@ -187,7 +187,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16Op_2(TestFlattenOp_2):
def init_test_dtype(self):
Expand All @@ -197,7 +197,7 @@ def init_test_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_2(TestFlattenOp_2):
def if_enable_cinn(self):
Expand Down Expand Up @@ -228,7 +228,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16Op_3(TestFlattenOp_3):
def init_test_dtype(self):
Expand All @@ -238,7 +238,7 @@ def init_test_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_3(TestFlattenOp_3):
def if_enable_cinn(self):
Expand Down Expand Up @@ -269,7 +269,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16Op_4(TestFlattenOp_4):
def init_test_dtype(self):
Expand All @@ -279,7 +279,7 @@ def init_test_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_4(TestFlattenOp_4):
def if_enable_cinn(self):
Expand Down Expand Up @@ -310,7 +310,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16Op_5(TestFlattenOp_5):
def init_test_dtype(self):
Expand All @@ -320,7 +320,7 @@ def init_test_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_5(TestFlattenOp_5):
def if_enable_cinn(self):
Expand Down Expand Up @@ -354,7 +354,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16Op_ZeroDim(TestFlattenOp_ZeroDim):
def init_test_dtype(self):
Expand Down Expand Up @@ -382,7 +382,7 @@ def init_test_dtype(self):

@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not complied with CUDA",
"core is not compiled with CUDA",
)
class TestFlattenFP16OpSixDims(TestFlattenOpSixDims):
def init_test_dtype(self):
Expand All @@ -392,7 +392,7 @@ def init_test_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFlattenBF16OpSixDims(TestFlattenOpSixDims):
def if_enable_cinn(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_frame_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def init_dtype(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFrameBF16OP(OpTest):
def setUp(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_full_like_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def init_data(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFullLikeBF16Op(TestFullLikeOp1):
def init_data(self):
Expand Down
14 changes: 7 additions & 7 deletions test/legacy_test/test_fused_bias_act_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def compute_baseline_output(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestFusedBiasActOpBF16(unittest.TestCase):
def setUp(self):
Expand Down Expand Up @@ -443,7 +443,7 @@ def test_check_output(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestWithComTypeBF16(unittest.TestCase):
def init_test_case(self):
Expand All @@ -454,7 +454,7 @@ def init_test_case(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestGegluBF16(TestFusedBiasActOpBF16):
def init_test_case(self):
Expand All @@ -473,7 +473,7 @@ def compute_baseline_output(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16 ",
"core is not compiled with CUDA and not support the bfloat16 ",
)
class TestSwigluBF16(TestFusedBiasActOpBF16):
def init_test_case(self):
Expand All @@ -492,7 +492,7 @@ def compute_baseline_output(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestQuantBF16(TestFusedBiasActOpBF16):
def init_test_case(self):
Expand Down Expand Up @@ -562,7 +562,7 @@ def compute_paddle_output(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestQuantGegluBF16(TestQuantBF16):
def init_test_case(self):
Expand Down Expand Up @@ -602,7 +602,7 @@ def compute_baseline_output(self):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
"core is not compiled with CUDA and not support the bfloat16",
)
class TestQuantSwigluBF16(TestQuantBF16):
def init_test_case(self):
Expand Down
Loading