Skip to content

Commit

Permalink
Profiling tool support for collection and analysis (#2590)
Browse files Browse the repository at this point in the history
* Qualification tool

Signed-off-by: Thomas Graves <tgraves@apache.org>

* remove unused func

* Add missing files

* Add checks for format option

* cast columsn to string to write to text

* Revert "Add checks for format option"

This reverts commit 6f5271c.

* cleanup

Signed-off-by: Thomas Graves <tgraves@nvidia.com>

* update output dir

* formating

* Update help messages

* update app name

* cleanup

* put test functions back

* fix typo

* add printSQLPlanMetrics and printRapidsJar

* use opt

* Add Analysis

* format output

* more tests

Signed-off-by: Thomas Graves <tgraves@apache.org>

* tests working

* test rearrange utils

* move test file

* move test file right location

* add Analysis Suite

* update test analysis

* add
rapids-4-spark-tools/src/test/resources/ProfilingExpectations/rapids_join_eventlog_jobandstagemetrics_expectation.csv

* add more tests

* more tests

Signed-off-by: Thomas Graves <tgraves@nvidia.com>

* remove unneeded expectation file

* Add more analysis tests

* comment

* cleanup

* fix logging include
  • Loading branch information
tgravescs authored Jun 4, 2021
1 parent 72fbf97 commit e65e826
Show file tree
Hide file tree
Showing 17 changed files with 1,282 additions and 69 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,10 @@ class Analysis(apps: ArrayBuffer[ApplicationInfo], fileWriter: Option[FileWriter

// Job Level TaskMetrics Aggregation
def jobMetricsAggregation(): Unit = {
val messageHeader = "\nJob level aggregated task metrics:\n"
if (apps.size == 1) {
fileWriter.foreach(_.write("Job level aggregated task metrics:"))
apps.head.runQuery(apps.head.jobMetricsAggregationSQL + " order by Duration desc")
apps.head.runQuery(apps.head.jobMetricsAggregationSQL + " order by Duration desc",
false, fileWriter, messageHeader)
} else {
var query = ""
for (app <- apps) {
Expand All @@ -47,16 +48,17 @@ class Analysis(apps: ArrayBuffer[ApplicationInfo], fileWriter: Option[FileWriter
query += " union " + app.jobMetricsAggregationSQL
}
}
fileWriter.foreach(_.write("Job level aggregated task metrics:"))
apps.head.runQuery(query + " order by appIndex, Duration desc")
apps.head.runQuery(query + " order by appIndex, Duration desc",
false, fileWriter, messageHeader)
}
}

// Stage Level TaskMetrics Aggregation
def stageMetricsAggregation(): Unit = {
val messageHeader = "\nStage level aggregated task metrics:\n"
if (apps.size == 1) {
fileWriter.foreach(_.write("Stage level aggregated task metrics:"))
apps.head.runQuery(apps.head.stageMetricsAggregationSQL + " order by Duration desc")
apps.head.runQuery(apps.head.stageMetricsAggregationSQL + " order by Duration desc",
false, fileWriter, messageHeader)
} else {
var query = ""
for (app <- apps) {
Expand All @@ -66,16 +68,17 @@ class Analysis(apps: ArrayBuffer[ApplicationInfo], fileWriter: Option[FileWriter
query += " union " + app.stageMetricsAggregationSQL
}
}
fileWriter.foreach(_.write("Stage level aggregated task metrics:"))
apps.head.runQuery(query + " order by appIndex, Duration desc")
apps.head.runQuery(query + " order by appIndex, Duration desc",
false, fileWriter, messageHeader)
}
}

// Job + Stage Level TaskMetrics Aggregation
def jobAndStageMetricsAggregation(): Unit = {
def jobAndStageMetricsAggregation(): DataFrame = {
val messageHeader = "\nJob + Stage level aggregated task metrics:\n"
if (apps.size == 1) {
val messageHeader = "Job + Stage level aggregated task metrics:"
apps.head.runQuery(apps.head.jobAndStageMetricsAggregationSQL + " order by Duration desc")
apps.head.runQuery(apps.head.jobAndStageMetricsAggregationSQL + " order by Duration desc",
false, fileWriter, messageHeader)
} else {
var query = ""
for (app <- apps) {
Expand All @@ -85,17 +88,18 @@ class Analysis(apps: ArrayBuffer[ApplicationInfo], fileWriter: Option[FileWriter
query += " union " + app.jobAndStageMetricsAggregationSQL
}
}
fileWriter.foreach(_.write("Job + Stage level aggregated task metrics:"))
apps.head.runQuery(query + " order by appIndex, Duration desc")
apps.head.runQuery(query + " order by appIndex, Duration desc",
false, fileWriter, messageHeader)
}
}

// SQL Level TaskMetrics Aggregation(Only when SQL exists)
def sqlMetricsAggregation(): DataFrame = {
val messageHeader = "\nSQL level aggregated task metrics:\n"
if (apps.size == 1) {
if (apps.head.allDataFrames.contains(s"sqlDF_${apps.head.index}")) {
val messageHeader = "SQL level aggregated task metrics:"
apps.head.runQuery(apps.head.sqlMetricsAggregationSQL + " order by Duration desc")
apps.head.runQuery(apps.head.sqlMetricsAggregationSQL + " order by Duration desc",
false, fileWriter, messageHeader)
} else {
apps.head.sparkSession.emptyDataFrame
}
Expand All @@ -109,8 +113,8 @@ class Analysis(apps: ArrayBuffer[ApplicationInfo], fileWriter: Option[FileWriter
query += " union " + app.sqlMetricsAggregationSQL
}
}
val messageHeader = "SQL level aggregated task metrics:"
apps.head.runQuery(query + " order by appIndex, Duration desc")
apps.head.runQuery(query + " order by appIndex, Duration desc", false,
fileWriter, messageHeader)
}
}

Expand All @@ -122,4 +126,40 @@ class Analysis(apps: ArrayBuffer[ApplicationInfo], fileWriter: Option[FileWriter
|""".stripMargin
app.runQuery(customQuery)
}
}

// Function to find out shuffle read skew(For Joins or Aggregation)
def shuffleSkewCheck(): Unit ={
for (app <- apps){
shuffleSkewCheckSingleApp(app)
}
}

def shuffleSkewCheckSingleApp(app: ApplicationInfo): DataFrame ={
val customQuery =
s"""with tmp as
|(select stageId, stageAttemptId,
|avg(sr_totalBytesRead) avgShuffleReadBytes,
|avg(duration) avgDuration
|from taskDF_${app.index}
|group by stageId,stageAttemptId)
|select ${app.index} as appIndex, t.stageId,t.stageAttemptId,
|t.taskId, t.attempt,
|round(t.duration/1000,2) as taskDurationSec,
|round(tmp.avgDuration/1000,2) as avgDurationSec,
|round(t.sr_totalBytesRead/1024/1024,2) as taskShuffleReadMB,
|round(tmp.avgShuffleReadBytes/1024/1024,2) as avgShuffleReadMB,
|round(t.peakExecutionMemory/1024/1024,2) as taskPeakMemoryMB,
|t.successful,
|substr(t.endReason,0,100) endReason_first100char
|from tmp, taskDF_${app.index} t
|where tmp.stageId=t.StageId
|and tmp.stageAttemptId=t.stageAttemptId
|and t.sr_totalBytesRead > 3 * tmp.avgShuffleReadBytes
|and t.sr_totalBytesRead > 100*1024*1024
|order by t.stageId, t.stageAttemptId, t.taskId,t.attempt
|""".stripMargin
val messageHeader = s"\nShuffle Skew Check:" +
" (When task's Shuffle Read Size > 3 * Avg Stage-level size)\n"
app.runQuery(customQuery, false, fileWriter, messageHeader)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ import java.util.concurrent.TimeUnit
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.rapids.tool.profiling.ApplicationInfo


/**
* CollectInformation mainly print information based on this event log:
* Such as executors, parameters, etc.
Expand All @@ -46,16 +46,15 @@ class CollectInformation(apps: ArrayBuffer[ApplicationInfo], fileWriter: FileWri
def printRapidsJAR(): Unit = {
for (app <- apps) {
if (app.gpuMode) {
fileWriter.write(s"Application ${app.appId} (index=${app.index}) 's" +
" Rapids Accelerator Jar and cuDF Jar:")
fileWriter.write("\nRapids Accelerator Jar and cuDF Jar:\n")
// Look for rapids-4-spark and cuDF jar
val rapidsJar = app.classpathEntries.filterKeys(_ matches ".*rapids-4-spark.*jar")
val cuDFJar = app.classpathEntries.filterKeys(_ matches ".*cudf.*jar")
if (rapidsJar.nonEmpty) {
rapidsJar.keys.foreach(k => fileWriter.write(k))
rapidsJar.keys.foreach(k => fileWriter.write(s"$k\n"))
}
if (cuDFJar.nonEmpty) {
cuDFJar.keys.foreach(k => fileWriter.write(k))
cuDFJar.keys.foreach(k => fileWriter.write(s"$k\n"))
}
}
}
Expand All @@ -79,13 +78,13 @@ class CollectInformation(apps: ArrayBuffer[ApplicationInfo], fileWriter: FileWri
}
}

def generateDot(outputDirectory: String): Unit = {
def generateDot(outputDirectory: String, accumsOpt: Option[DataFrame]): Unit = {
for (app <- apps) {
val requiredDataFrames = Seq("sqlMetricsDF", "driverAccumDF",
"taskStageAccumDF", "taskStageAccumDF")
.map(name => s"${name}_${app.index}")
if (requiredDataFrames.forall(app.allDataFrames.contains)) {
val accums = app.runQuery(app.generateSQLAccums)
val accums = accumsOpt.getOrElse(app.runQuery(app.generateSQLAccums))
val start = System.nanoTime()
val accumSummary = accums
.select(col("sqlId"), col("accumulatorId"), col("max_value"))
Expand Down Expand Up @@ -113,4 +112,17 @@ class CollectInformation(apps: ArrayBuffer[ApplicationInfo], fileWriter: FileWri
}
}
}

// Print SQL Plan Metrics
def printSQLPlanMetrics(shouldGenDot: Boolean, outputDir: String,
writeOutput: Boolean = true): Unit ={
for (app <- apps){
val messageHeader = "\nSQL Plan Metrics for Application:\n"
val accums = app.runQuery(app.generateSQLAccums, fileWriter = Some(fileWriter),
messageHeader=messageHeader)
if (shouldGenDot) {
generateDot(outputDir, Some(accums))
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,9 @@ object ProfileMain extends Logging {
val app = new ApplicationInfo(numOutputRows, sparkSession, path, index)
apps += app
logApplicationInfo(app)
// This is a bit odd that we process apps individual right now due to
// memory concerns. So the aggregation functions only aggregate single
// application not across applications.
processApps(apps, appArgs.generateDot())
app.dropAllTempViews()
index += 1
Expand All @@ -114,25 +117,26 @@ object ProfileMain extends Logging {
*/
def processApps(apps: ArrayBuffer[ApplicationInfo], generateDot: Boolean): Unit = {
if (appArgs.compare()) { // Compare Applications
logInfo(s"### A. Compare Information Collected ###")
fileWriter.write("### A. Compare Information Collected ###")
val compare = new CompareApplications(apps, fileWriter)
compare.compareAppInfo()
compare.compareExecutorInfo()
compare.compareRapidsProperties()
} else {
val collect = new CollectInformation(apps, fileWriter)
logInfo(s"### A. Information Collected ###")
fileWriter.write("### A. Information Collected ###")
collect.printAppInfo()
collect.printExecutorInfo()
collect.printRapidsProperties()
if (generateDot) {
collect.generateDot(appArgs.outputDirectory())
}
collect.printRapidsJAR()
collect.printSQLPlanMetrics(generateDot, appArgs.outputDirectory())
}

logInfo(s"### B. Analysis ###")
fileWriter.write("\n### B. Analysis ###\n")
val analysis = new Analysis(apps, Some(fileWriter))
analysis.jobAndStageMetricsAggregation()
analysis.sqlMetricsAggregation()
analysis.shuffleSkewCheck()
}

def logApplicationInfo(app: ApplicationInfo) = {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
appIndex,ID,numTasks,Duration,diskBytesSpilled_sum,duration_sum,duration_max,duration_min,duration_avg,executorCPUTime_sum,executorDeserializeCPUTime_sum,executorDeserializeTime_sum,executorRunTime_sum,gettingResultTime_sum,input_bytesRead_sum,input_recordsRead_sum,jvmGCTime_sum,memoryBytesSpilled_sum,output_bytesWritten_sum,output_recordsWritten_sum,peakExecutionMemory_max,resultSerializationTime_sum,resultSize_max,sr_fetchWaitTime_sum,sr_localBlocksFetched_sum,sr_localBytesRead_sum,sr_remoteBlocksFetched_sum,sr_remoteBytesRead_sum,sr_remoteBytesReadToDisk_sum,sr_totalBytesRead_sum,sw_bytesWritten_sum,sw_recordsWritten_sum,sw_writeTime_sum
1,job_0,213,2515,0,25761,1624,9,120.9,7045,3021,11178,13522,0,0,0,424,0,0,0,0,10,8075,0,2600,80279920,0,0,0,80279920,80279920,2600,849
1,stage_0,6,1761,0,9455,1624,1540,1575.8,2914,1283,5056,4248,0,0,0,228,0,0,0,0,3,2951,0,0,0,0,0,0,0,40132263,1200,373
1,stage_1,6,1666,0,9274,1621,1528,1545.7,2568,1004,5016,4099,0,0,0,196,0,0,0,0,4,2951,0,0,0,0,0,0,0,40132257,1200,473
1,stage_2,200,592,0,6937,221,9,34.7,1518,696,1065,5125,0,0,0,0,0,0,0,0,3,7402,0,2400,80264520,0,0,0,80264520,15400,200,3
1,stage_3,1,101,0,95,95,95,95.0,45,38,41,50,0,0,0,0,0,0,0,0,0,8075,0,200,15400,0,0,0,15400,0,0,0
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
appIndex,ID,numTasks,Duration,diskBytesSpilled_sum,duration_sum,duration_max,duration_min,duration_avg,executorCPUTime_sum,executorDeserializeCPUTime_sum,executorDeserializeTime_sum,executorRunTime_sum,gettingResultTime_sum,input_bytesRead_sum,input_recordsRead_sum,jvmGCTime_sum,memoryBytesSpilled_sum,output_bytesWritten_sum,output_recordsWritten_sum,peakExecutionMemory_max,resultSerializationTime_sum,resultSize_max,sr_fetchWaitTime_sum,sr_localBlocksFetched_sum,sr_localBytesRead_sum,sr_remoteBlocksFetched_sum,sr_remoteBytesRead_sum,sr_remoteBytesReadToDisk_sum,sr_totalBytesRead_sum,sw_bytesWritten_sum,sw_recordsWritten_sum,sw_writeTime_sum
1,job_0,213,2569,0,26735,1598,10,125.5,6500,3433,12095,13414,0,0,0,336,0,0,0,0,8,8075,0,2600,80279908,0,0,0,80279908,80279908,2600,944
1,stage_0,6,1743,0,9518,1598,1580,1586.3,2509,1391,5309,4043,0,0,0,168,0,0,0,0,3,2951,0,0,0,0,0,0,0,40132250,1200,397
1,stage_1,6,1631,0,9434,1582,1568,1572.3,2405,1065,5273,3998,0,0,0,168,0,0,0,0,5,2951,0,0,0,0,0,0,0,40132258,1200,505
1,stage_2,200,688,0,7705,237,10,38.5,1556,940,1474,5337,0,0,0,0,0,0,0,0,0,7359,0,2400,80264508,0,0,0,80264508,15400,200,42
1,stage_3,1,83,0,78,78,78,78.0,30,37,39,36,0,0,0,0,0,0,0,0,0,8075,0,200,15400,0,0,0,15400,0,0,0
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
appIndex,ID,numTasks,Duration,diskBytesSpilled_sum,duration_sum,duration_max,duration_min,duration_avg,executorCPUTime_sum,executorDeserializeCPUTime_sum,executorDeserializeTime_sum,executorRunTime_sum,gettingResultTime_sum,input_bytesRead_sum,input_recordsRead_sum,jvmGCTime_sum,memoryBytesSpilled_sum,output_bytesWritten_sum,output_recordsWritten_sum,peakExecutionMemory_max,resultSerializationTime_sum,resultSize_max,sr_fetchWaitTime_sum,sr_localBlocksFetched_sum,sr_localBytesRead_sum,sr_remoteBlocksFetched_sum,sr_remoteBytesRead_sum,sr_remoteBytesReadToDisk_sum,sr_totalBytesRead_sum,sw_bytesWritten_sum,sw_recordsWritten_sum,sw_writeTime_sum
1,job_0,213,2569,0,26735,1598,10,125.5,6500,3433,12095,13414,0,0,0,336,0,0,0,0,8,8075,0,2600,80279908,0,0,0,80279908,80279908,2600,944
1,stage_0,6,1743,0,9518,1598,1580,1586.3,2509,1391,5309,4043,0,0,0,168,0,0,0,0,3,2951,0,0,0,0,0,0,0,40132250,1200,397
1,stage_1,6,1631,0,9434,1582,1568,1572.3,2405,1065,5273,3998,0,0,0,168,0,0,0,0,5,2951,0,0,0,0,0,0,0,40132258,1200,505
1,stage_2,200,688,0,7705,237,10,38.5,1556,940,1474,5337,0,0,0,0,0,0,0,0,0,7359,0,2400,80264508,0,0,0,80264508,15400,200,42
1,stage_3,1,83,0,78,78,78,78.0,30,37,39,36,0,0,0,0,0,0,0,0,0,8075,0,200,15400,0,0,0,15400,0,0,0
2,job_0,213,2515,0,25761,1624,9,120.9,7045,3021,11178,13522,0,0,0,424,0,0,0,0,10,8075,0,2600,80279920,0,0,0,80279920,80279920,2600,849
2,stage_0,6,1761,0,9455,1624,1540,1575.8,2914,1283,5056,4248,0,0,0,228,0,0,0,0,3,2951,0,0,0,0,0,0,0,40132263,1200,373
2,stage_1,6,1666,0,9274,1621,1528,1545.7,2568,1004,5016,4099,0,0,0,196,0,0,0,0,4,2951,0,0,0,0,0,0,0,40132257,1200,473
2,stage_2,200,592,0,6937,221,9,34.7,1518,696,1065,5125,0,0,0,0,0,0,0,0,3,7402,0,2400,80264520,0,0,0,80264520,15400,200,3
2,stage_3,1,101,0,95,95,95,95.0,45,38,41,50,0,0,0,0,0,0,0,0,0,8075,0,200,15400,0,0,0,15400,0,0,0
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
sqlID,nodeID,nodeName,accumulatorId,name,max_value,metricType
0,0,GpuColumnarToRow,33,total time,857404,nsTiming
0,1,GpuHashAggregate,34,output rows,1,sum
0,1,GpuHashAggregate,35,output columnar batches,1,sum
0,1,GpuHashAggregate,36,total time,4212819,nsTiming
0,1,GpuHashAggregate,37,aggregation time,3846803,nsTiming
0,2,GpuShuffleCoalesce,39,output rows,200,sum
0,2,GpuShuffleCoalesce,40,output columnar batches,1,sum
0,2,GpuShuffleCoalesce,41,total time,3803240,nsTiming
0,2,GpuShuffleCoalesce,42,collect batch time,3277904,nsTiming
0,2,GpuShuffleCoalesce,43,concat batch time,392509,nsTiming
0,3,GpuColumnarExchange,44,partition data size,16000,sum
0,3,GpuColumnarExchange,45,partitions,1,sum
0,3,GpuColumnarExchange,46,output rows,200,sum
0,3,GpuColumnarExchange,47,output columnar batches,200,sum
0,3,GpuColumnarExchange,48,data size,19600,size
0,3,GpuColumnarExchange,50,local blocks read,200,sum
0,3,GpuColumnarExchange,53,local bytes read,15400,size
0,3,GpuColumnarExchange,54,fetch wait time,0,timing
0,3,GpuColumnarExchange,55,records read,200,sum
0,3,GpuColumnarExchange,56,shuffle bytes written,15400,size
0,3,GpuColumnarExchange,57,shuffle records written,200,sum
0,3,GpuColumnarExchange,58,shuffle write time,93193331,nsTiming
0,4,GpuHashAggregate,59,output rows,200,sum
0,4,GpuHashAggregate,60,output columnar batches,200,sum
0,4,GpuHashAggregate,61,total time,80781515,nsTiming
0,4,GpuHashAggregate,62,aggregation time,31923387,nsTiming
0,5,GpuProject,64,total time,5377158,nsTiming
0,6,GpuShuffledHashJoin,65,output rows,10000000,sum
0,6,GpuShuffledHashJoin,66,output columnar batches,200,sum
0,6,GpuShuffledHashJoin,67,total time,3904332009,nsTiming
0,6,GpuShuffledHashJoin,68,build side size,80000000,size
0,6,GpuShuffledHashJoin,69,build time,3448606506,nsTiming
0,6,GpuShuffledHashJoin,70,stream time,260796041,nsTiming
0,6,GpuShuffledHashJoin,71,join time,178084313,nsTiming
0,6,GpuShuffledHashJoin,72,join output rows,10000000,sum
0,7,GpuShuffleCoalesce,74,output rows,10000000,sum
0,7,GpuShuffleCoalesce,75,output columnar batches,200,sum
0,7,GpuShuffleCoalesce,76,total time,261389422,nsTiming
0,7,GpuShuffleCoalesce,77,collect batch time,167775821,nsTiming
0,7,GpuShuffleCoalesce,78,concat batch time,83550919,nsTiming
0,8,GpuColumnarExchange,79,partition data size,42872100,sum
0,8,GpuColumnarExchange,80,partitions,200,sum
0,8,GpuColumnarExchange,81,output rows,10000000,sum
0,8,GpuColumnarExchange,82,output columnar batches,1200,sum
0,8,GpuColumnarExchange,83,data size,40076192,size
0,8,GpuColumnarExchange,85,local blocks read,1200,sum
0,8,GpuColumnarExchange,88,local bytes read,40132258,size
0,8,GpuColumnarExchange,89,fetch wait time,0,timing
0,8,GpuColumnarExchange,90,records read,1200,sum
0,8,GpuColumnarExchange,91,shuffle bytes written,40132258,size
0,8,GpuColumnarExchange,92,shuffle records written,1200,sum
0,8,GpuColumnarExchange,93,shuffle write time,508750471,nsTiming
0,9,GpuProject,94,total time,6667140,nsTiming
0,10,GpuRowToColumnar,95,total time,61112304,nsTiming
0,11,WholeStageCodegen (1),96,duration,5463,timing
0,13,Scan,97,number of output rows,10000000,sum
0,14,GpuCoalesceBatches,98,output rows,10000000,sum
0,14,GpuCoalesceBatches,99,output columnar batches,200,sum
0,14,GpuCoalesceBatches,100,total time,3383354389,nsTiming
0,14,GpuCoalesceBatches,101,collect batch time,3275108263,nsTiming
0,14,GpuCoalesceBatches,102,concat batch time,20312708,nsTiming
0,14,GpuCoalesceBatches,103,peak device memory,80000000,size
0,15,GpuShuffleCoalesce,107,output rows,10000000,sum
0,15,GpuShuffleCoalesce,108,output columnar batches,200,sum
0,15,GpuShuffleCoalesce,109,total time,3266208420,nsTiming
0,15,GpuShuffleCoalesce,110,collect batch time,359397047,nsTiming
0,15,GpuShuffleCoalesce,111,concat batch time,104974316,nsTiming
0,16,GpuColumnarExchange,112,partition data size,42872100,sum
0,16,GpuColumnarExchange,113,partitions,200,sum
0,16,GpuColumnarExchange,114,output rows,10000000,sum
0,16,GpuColumnarExchange,115,output columnar batches,1200,sum
0,16,GpuColumnarExchange,116,data size,40076192,size
0,16,GpuColumnarExchange,118,local blocks read,1200,sum
0,16,GpuColumnarExchange,121,local bytes read,40132250,size
0,16,GpuColumnarExchange,122,fetch wait time,0,timing
0,16,GpuColumnarExchange,123,records read,1200,sum
0,16,GpuColumnarExchange,124,shuffle bytes written,40132250,size
0,16,GpuColumnarExchange,125,shuffle records written,1200,sum
0,16,GpuColumnarExchange,126,shuffle write time,400284505,nsTiming
0,17,GpuProject,127,total time,207820,nsTiming
0,18,GpuRowToColumnar,128,total time,58640462,nsTiming
0,19,WholeStageCodegen (2),129,duration,5920,timing
0,21,Scan,130,number of output rows,10000000,sum
Loading

0 comments on commit e65e826

Please sign in to comment.