Skip to content

Commit

Permalink
Removed the redundant test for element_at and fixed the failing one […
Browse files Browse the repository at this point in the history
…databricks] (#4502)

* removed the redundant test for element_at and fixed the failing one

Signed-off-by: Raza Jafri <rjafri@nvidia.com>

* reduce databricks parallelism

Signed-off-by: Raza Jafri <rjafri@nvidia.com>

* Revert "reduce databricks parallelism"

This reverts commit 15c8738.

Signed-off-by: Raza Jafri <rjafri@nvidia.com>

Co-authored-by: Raza Jafri <rjafri@nvidia.com>
  • Loading branch information
razajafri and razajafri authored Jan 13, 2022
1 parent eab40b0 commit b64e316
Showing 1 changed file with 2 additions and 11 deletions.
13 changes: 2 additions & 11 deletions integration_tests/src/main/python/array_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,16 +132,6 @@ def test_get_array_item_ansi_fail(data_gen):
'spark.sql.legacy.allowNegativeScaleOfDecimal': True},
error_message=message)

@pytest.mark.skipif(is_before_spark_311(), reason="Only in Spark 3.1.1 + ANSI mode, array index throws on out of range indexes")
@pytest.mark.parametrize('data_gen', array_gens_sample, ids=idfn)
def test_element_at_index_ansi_fail(data_gen):
message = "org.apache.spark.SparkArrayIndexOutOfBoundsException" if not is_before_spark_330() else "java.lang.ArrayIndexOutOfBoundsException"
assert_gpu_and_cpu_error(lambda spark: unary_op_df(
spark, data_gen).select(element_at(col('a'), 100)).collect(),
conf={'spark.sql.ansi.enabled':True,
'spark.sql.legacy.allowNegativeScaleOfDecimal': True},
error_message=message)

@pytest.mark.skipif(not is_before_spark_311(), reason="For Spark before 3.1.1 + ANSI mode, null will be returned instead of an exception if index is out of range")
@pytest.mark.parametrize('data_gen', array_gens_sample, ids=idfn)
def test_get_array_item_ansi_not_fail(data_gen):
Expand All @@ -161,11 +151,12 @@ def test_array_element_at(data_gen):
@pytest.mark.skipif(is_before_spark_311(), reason="Only in Spark 3.1.1 + ANSI mode, array index throws on out of range indexes")
@pytest.mark.parametrize('data_gen', array_gens_sample, ids=idfn)
def test_array_element_at_ansi_fail(data_gen):
message = "org.apache.spark.SparkArrayIndexOutOfBoundsException" if not is_before_spark_330() else "java.lang.ArrayIndexOutOfBoundsException"
assert_gpu_and_cpu_error(lambda spark: unary_op_df(
spark, data_gen).select(element_at(col('a'), 100)).collect(),
conf={'spark.sql.ansi.enabled':True,
'spark.sql.legacy.allowNegativeScaleOfDecimal': True},
error_message='java.lang.ArrayIndexOutOfBoundsException')
error_message=message)

@pytest.mark.skipif(not is_before_spark_311(), reason="For Spark before 3.1.1 + ANSI mode, null will be returned instead of an exception if index is out of range")
@pytest.mark.parametrize('data_gen', array_gens_sample, ids=idfn)
Expand Down

0 comments on commit b64e316

Please sign in to comment.