Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to new is_before_spark_311 function name #2005

Merged
merged 1 commit into from
Mar 24, 2021
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions integration_tests/src/main/python/hash_aggregate_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def test_hash_multiple_mode_query_avg_distincts(data_gen, conf):
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_hash_query_multiple_distincts_with_non_distinct(data_gen, conf, parameterless):
conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_sql(
Expand All @@ -290,7 +290,7 @@ def test_hash_query_multiple_distincts_with_non_distinct(data_gen, conf, paramet
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs),
ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_hash_query_max_with_multiple_distincts(data_gen, conf, parameterless):
conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_sql(
Expand Down Expand Up @@ -344,7 +344,7 @@ def test_hash_query_max_bug(data_gen):
@pytest.mark.parametrize('data_gen', [_grpkey_floats_with_nan_zero_grouping_keys,
_grpkey_doubles_with_nan_zero_grouping_keys], ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_hash_agg_with_nan_keys(data_gen, parameterless):
_no_nans_float_conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_sql(
Expand Down Expand Up @@ -392,7 +392,7 @@ def test_count_distinct_with_nan_floats(data_gen):

@pytest.mark.parametrize('data_gen', non_nan_all_basic_gens, ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_generic_reductions(data_gen, parameterless):
_no_nans_float_conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_collect(
Expand All @@ -411,7 +411,7 @@ def test_generic_reductions(data_gen, parameterless):

@pytest.mark.parametrize('data_gen', non_nan_all_basic_gens, ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_count(data_gen, parameterless):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen) \
Expand Down