diff --git a/pom.xml b/pom.xml index fc9832fe5e1..30402690abf 100644 --- a/pom.xml +++ b/pom.xml @@ -363,6 +363,11 @@ + + org.codehaus.mojo + exec-maven-plugin + 3.0.0 + diff --git a/sql-plugin/pom.xml b/sql-plugin/pom.xml index 82b11022161..4a913cad0ac 100644 --- a/sql-plugin/pom.xml +++ b/sql-plugin/pom.xml @@ -104,15 +104,6 @@ - - - org.scalatest - scalatest-maven-plugin - 2.0.0 - - true - - maven-antrun-plugin @@ -162,14 +153,6 @@ - - org.scalatest - scalatest-maven-plugin - - - org.scoverage - scoverage-maven-plugin - org.scalastyle scalastyle-maven-plugin diff --git a/sql-plugin/src/main/scala/ai/rapids/spark/GpuDeviceManager.scala b/sql-plugin/src/main/scala/ai/rapids/spark/GpuDeviceManager.scala index d6ba6d3113d..4b66b59ebbb 100644 --- a/sql-plugin/src/main/scala/ai/rapids/spark/GpuDeviceManager.scala +++ b/sql-plugin/src/main/scala/ai/rapids/spark/GpuDeviceManager.scala @@ -188,7 +188,8 @@ object GpuDeviceManager extends Logging { s"${initialAllocation / 1024 / 1024.0} MB on gpuId $gpuId") try { - Rmm.initialize(init, logConf, initialAllocation, gpuId) + Cuda.setDevice(gpuId) + Rmm.initialize(init, logConf, initialAllocation) GpuShuffleEnv.initStorage(conf, info) } catch { case e: Exception => logError("Could not initialize RMM", e) diff --git a/sql-plugin/src/main/scala/ai/rapids/spark/GpuOrcScan.scala b/sql-plugin/src/main/scala/ai/rapids/spark/GpuOrcScan.scala index b39d209dffe..121539077db 100644 --- a/sql-plugin/src/main/scala/ai/rapids/spark/GpuOrcScan.scala +++ b/sql-plugin/src/main/scala/ai/rapids/spark/GpuOrcScan.scala @@ -642,7 +642,7 @@ class GpuOrcPartitionReader( .withCompression(orcReader.getCompressionKind) .withFileSystem(fs) .withPath(filePath) - .withTypeCount(orcReader.getTypes.size) + .withTypeCount(org.apache.orc.OrcUtils.getOrcTypes(orcReader.getSchema).size) .withZeroCopy(zeroCopy) .withMaxDiskRangeChunkLimit(maxDiskRangeChunkLimit) .build()) diff --git a/sql-plugin/src/main/scala/ai/rapids/spark/Plugin.scala b/sql-plugin/src/main/scala/ai/rapids/spark/Plugin.scala index 4d0d79951fc..d2faa399323 100644 --- a/sql-plugin/src/main/scala/ai/rapids/spark/Plugin.scala +++ b/sql-plugin/src/main/scala/ai/rapids/spark/Plugin.scala @@ -266,9 +266,8 @@ class SQLPlugin extends SparkPlugin with Logging { /** * Old version of SQLPlugin kept for backwards compatibility - * @deprecated please use SQLPlugin instead */ -@scala.deprecated +@scala.deprecated("Please use ai.rapids.spark.SQLPlugin instead", since="0.1") class RapidsSparkPlugin extends SQLPlugin { override def driverPlugin(): DriverPlugin = { logWarning(s"The plugin class ${this.getClass.getName} is deprecated please use " + diff --git a/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/bitwise.scala b/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/bitwise.scala index c8cc9a3fa78..9d209194e2f 100644 --- a/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/bitwise.scala +++ b/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/bitwise.scala @@ -38,6 +38,7 @@ object ShiftHelper extends Arm { private def maskForDistance(t: DType): Int = t match { case DType.INT32 => 0x1F // 0b11111 case DType.INT64 => 0x3F //0b111111 + case t => throw new IllegalArgumentException(s"$t is not a supported type for java bit shifts") } def fixupDistanceNoClose(t: DType, distance: ColumnVector): ColumnVector = {