Skip to content

Commit

Permalink
rearranged imports
Browse files Browse the repository at this point in the history
  • Loading branch information
razajafri committed Dec 4, 2020
1 parent baae219 commit a4835aa
Showing 1 changed file with 6 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,18 @@ import java.lang.reflect.Method
import java.nio.charset.StandardCharsets

import scala.collection.mutable

import ai.rapids.cudf.{ColumnVector, DType, Table, TableWriter}
import com.nvidia.spark.rapids.shims.spark310.{ParquetCachedBatchSerializer, ParquetOutputFileFormat}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.{RecordWriter, TaskAttemptContext}
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.spark.{SparkConf, SparkException}
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock

import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.internal.SQLConf
Expand Down Expand Up @@ -265,8 +267,9 @@ class ParquetWriterSuite extends SparkQueryCompareTestSuite {
private var compressWithParquetMethod: Option[Method] = None
private var ref: Option[Any] = None

private def testCompressColBatch(cudfCols: Array[ColumnVector],
gpuCols: Array[org.apache.spark.sql.vectorized.ColumnVector]): Unit = {
private def testCompressColBatch(
cudfCols: Array[ColumnVector],
gpuCols: Array[org.apache.spark.sql.vectorized.ColumnVector]): Unit = {
if (!withCpuSparkSession(s => s.version < "3.1.0")) {

// mock static method for Table
Expand Down

0 comments on commit a4835aa

Please sign in to comment.