Skip to content

Commit

Permalink
Update to new is_before_spark_311 function name (#2005)
Browse files Browse the repository at this point in the history
Signed-off-by: Jason Lowe <jlowe@nvidia.com>
  • Loading branch information
jlowe authored Mar 24, 2021
1 parent 892b4c5 commit 087e1ea
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions integration_tests/src/main/python/hash_aggregate_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def test_hash_multiple_mode_query_avg_distincts(data_gen, conf):
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_hash_query_multiple_distincts_with_non_distinct(data_gen, conf, parameterless):
conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_sql(
Expand All @@ -290,7 +290,7 @@ def test_hash_query_multiple_distincts_with_non_distinct(data_gen, conf, paramet
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs),
ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_hash_query_max_with_multiple_distincts(data_gen, conf, parameterless):
conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_sql(
Expand Down Expand Up @@ -344,7 +344,7 @@ def test_hash_query_max_bug(data_gen):
@pytest.mark.parametrize('data_gen', [_grpkey_floats_with_nan_zero_grouping_keys,
_grpkey_doubles_with_nan_zero_grouping_keys], ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_hash_agg_with_nan_keys(data_gen, parameterless):
_no_nans_float_conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_sql(
Expand Down Expand Up @@ -392,7 +392,7 @@ def test_count_distinct_with_nan_floats(data_gen):

@pytest.mark.parametrize('data_gen', non_nan_all_basic_gens, ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_generic_reductions(data_gen, parameterless):
_no_nans_float_conf.update({'spark.sql.legacy.allowParameterlessCount': parameterless})
assert_gpu_and_cpu_are_equal_collect(
Expand All @@ -411,7 +411,7 @@ def test_generic_reductions(data_gen, parameterless):

@pytest.mark.parametrize('data_gen', non_nan_all_basic_gens, ids=idfn)
@pytest.mark.parametrize('parameterless', ['true', pytest.param('false', marks=pytest.mark.xfail(
condition=not is_before_spark_310(), reason="parameterless count not supported by default in Spark 3.1+"))])
condition=not is_before_spark_311(), reason="parameterless count not supported by default in Spark 3.1+"))])
def test_count(data_gen, parameterless):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen) \
Expand Down

0 comments on commit 087e1ea

Please sign in to comment.