From c2fdaef7376b1ee33d90fca23551a20e53698602 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Wed, 6 Jan 2021 09:49:50 -0600 Subject: [PATCH] Support reading decimal columns from parquet files on Databricks Signed-off-by: Thomas Graves --- .../nvidia/spark/rapids/shims/spark301db/Spark301dbShims.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shims/spark301db/src/main/scala/com/nvidia/spark/rapids/shims/spark301db/Spark301dbShims.scala b/shims/spark301db/src/main/scala/com/nvidia/spark/rapids/shims/spark301db/Spark301dbShims.scala index 4c5dd8b4625..e3d955c5e40 100644 --- a/shims/spark301db/src/main/scala/com/nvidia/spark/rapids/shims/spark301db/Spark301dbShims.scala +++ b/shims/spark301db/src/main/scala/com/nvidia/spark/rapids/shims/spark301db/Spark301dbShims.scala @@ -96,7 +96,7 @@ class Spark301dbShims extends Spark301Shims { GpuOverrides.exec[FileSourceScanExec]( "Reading data from files, often from Hive tables", ExecChecks((TypeSig.commonCudfTypes + TypeSig.NULL + TypeSig.STRUCT + TypeSig.MAP + - TypeSig.ARRAY).nested(), TypeSig.all), + TypeSig.ARRAY + TypeSig.DECIMAL).nested(), TypeSig.all), (fsse, conf, p, r) => new SparkPlanMeta[FileSourceScanExec](fsse, conf, p, r) { // partition filters and data filters are not run on the GPU override val childExprs: Seq[ExprMeta[_]] = Seq.empty