Skip to content

Commit

Permalink
disable some tests that are not fallback tests and fail due to deleti…
Browse files Browse the repository at this point in the history
…on vector metrics missing
  • Loading branch information
andygrove committed Jun 30, 2023
1 parent 023d152 commit 66465ee
Showing 1 changed file with 3 additions and 0 deletions.
3 changes: 3 additions & 0 deletions integration_tests/src/main/python/delta_lake_update_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def write_func(spark, path):
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(is_databricks122_or_later(), reason="https://github.com/NVIDIA/spark-rapids/issues/8423")
def test_delta_update_entire_table(spark_tmp_path, use_cdf, partition_columns):
def generate_dest_data(spark):
return three_col_df(spark,
Expand Down Expand Up @@ -124,6 +125,7 @@ def generate_dest_data(spark):
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(is_databricks122_or_later(), reason="https://github.com/NVIDIA/spark-rapids/issues/8423")
def test_delta_update_rows(spark_tmp_path, use_cdf, partition_columns):
# Databricks changes the number of files being written, so we cannot compare logs unless there's only one slice
num_slices_to_test = 1 if is_databricks_runtime() else 10
Expand All @@ -142,6 +144,7 @@ def generate_dest_data(spark):
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(is_databricks122_or_later(), reason="https://github.com/NVIDIA/spark-rapids/issues/8423")
def test_delta_update_dataframe_api(spark_tmp_path, use_cdf, partition_columns):
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/DELTA_DATA"
Expand Down

0 comments on commit 66465ee

Please sign in to comment.