Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid serializing plan in GpuCoalesceBatches, GpuHashAggregateExec, and GpuTopN #5886

Merged
merged 1 commit into from
Jun 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -705,6 +705,7 @@ case class GpuCoalesceBatches(child: SparkPlan, goal: CoalesceGoal)
val decompressMemoryTarget = maxDecompressBatchMemory

val batches = child.executeColumnar()
val localCodecConfigs = codecConfigs
if (outputSchema.isEmpty) {
batches.mapPartitions { iter =>
val numRows = iter.map(_.numRows).sum
Expand All @@ -720,7 +721,7 @@ case class GpuCoalesceBatches(child: SparkPlan, goal: CoalesceGoal)
iter, dataTypes, sizeGoal, decompressMemoryTarget,
numInputRows, numInputBatches, numOutputRows, numOutputBatches, NoopMetric,
concatTime, opTime, peakDevMemory, callback, "GpuCoalesceBatches",
codecConfigs)
localCodecConfigs)
}
case batchingGoal: BatchedByKey =>
val targetSize = RapidsConf.GPU_BATCH_SIZE_BYTES.get(conf)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1438,25 +1438,27 @@ case class GpuHashAggregateExec(
makeSpillCallback(allMetrics))

// cache in a local variable to avoid serializing the full child plan
val inputAttrs = inputAttributes
val groupingExprs = groupingExpressions
val aggregateExprs = aggregateExpressions
val aggregateAttrs = aggregateAttributes
val resultExprs = resultExpressions
val modeInfo = AggregateModeInfo(uniqueModes)
val targetBatchSize = configuredTargetBatchSize

val rdd = child.executeColumnar()

rdd.mapPartitions { cbIter =>
new GpuHashAggregateIterator(
cbIter,
inputAttributes,
inputAttrs,
groupingExprs,
aggregateExprs,
aggregateAttrs,
resultExprs,
modeInfo,
aggMetrics,
configuredTargetBatchSize)
targetBatchSize)
}
}

Expand Down
8 changes: 6 additions & 2 deletions sql-plugin/src/main/scala/com/nvidia/spark/rapids/limit.scala
Original file line number Diff line number Diff line change
Expand Up @@ -296,10 +296,14 @@ case class GpuTopN(
val sortTime = gpuLongMetric(SORT_TIME)
val concatTime = gpuLongMetric(CONCAT_TIME)
val callback = GpuMetric.makeSpillCallback(allMetrics)
val localLimit = limit
val localProjectList = projectList
val childOutput = child.output

child.executeColumnar().mapPartitions { iter =>
val topN = GpuTopN(limit, sorter, iter, opTime, sortTime, concatTime,
val topN = GpuTopN(localLimit, sorter, iter, opTime, sortTime, concatTime,
inputBatches, inputRows, outputBatches, outputRows, callback)
if (projectList != child.output) {
if (localProjectList != childOutput) {
topN.map { batch =>
GpuProjectExec.projectAndClose(batch, boundProjectExprs, opTime)
}
Expand Down