-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add a Reader/Writer Interface for Streaming #25
Changes from 6 commits
8ed211b
0676db4
3a87479
f4f7205
99969b9
e06c0e3
a52200b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.sql | ||
|
||
import org.apache.spark.sql.execution.streaming.StreamingRelation | ||
|
||
import scala.collection.JavaConverters._ | ||
|
||
import org.apache.hadoop.util.StringUtils | ||
|
||
import org.apache.spark.Logging | ||
import org.apache.spark.annotation.Experimental | ||
import org.apache.spark.sql.execution.datasources.{LogicalRelation, ResolvedDataSource} | ||
import org.apache.spark.sql.types.StructType | ||
|
||
/** | ||
* :: Experimental :: | ||
* An interface to reading streaming data. Use `sqlContext.streamFrom` to access these methods. | ||
*/ | ||
@Experimental | ||
class DataStreamReader private[sql](sqlContext: SQLContext) extends Logging { | ||
|
||
/** | ||
* Specifies the input data source format. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def format(source: String): DataStreamReader = { | ||
this.source = source | ||
this | ||
} | ||
|
||
/** | ||
* Specifies the input schema. Some data streams (e.g. JSON) can infer the input schema | ||
* automatically from data. By specifying the schema here, the underlying data stream can | ||
* skip the schema inference step, and thus speed up data reading. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def schema(schema: StructType): DataStreamReader = { | ||
this.userSpecifiedSchema = Option(schema) | ||
this | ||
} | ||
|
||
/** | ||
* Adds an input option for the underlying data stream. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def option(key: String, value: String): DataStreamReader = { | ||
this.extraOptions += (key -> value) | ||
this | ||
} | ||
|
||
/** | ||
* (Scala-specific) Adds input options for the underlying data stream. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def options(options: scala.collection.Map[String, String]): DataStreamReader = { | ||
this.extraOptions ++= options | ||
this | ||
} | ||
|
||
/** | ||
* Adds input options for the underlying data stream. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def options(options: java.util.Map[String, String]): DataStreamReader = { | ||
this.options(options.asScala) | ||
this | ||
} | ||
|
||
/** | ||
* Loads streaming input in as a [[DataFrame]], for data streams that don't require a path (e.g. | ||
* external key-value stores). | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def open(): DataFrame = { | ||
val resolved = ResolvedDataSource.createSource( | ||
sqlContext, | ||
userSpecifiedSchema = userSpecifiedSchema, | ||
providerName = source, | ||
options = extraOptions.toMap) | ||
DataFrame(sqlContext, StreamingRelation(resolved)) | ||
} | ||
|
||
/** | ||
* Loads input in as a [[DataFrame]], for data streams that read from some path. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def open(path: String): DataFrame = { | ||
option("path", path).open() | ||
} | ||
|
||
/////////////////////////////////////////////////////////////////////////////////////// | ||
// Builder pattern config options | ||
/////////////////////////////////////////////////////////////////////////////////////// | ||
|
||
private var source: String = sqlContext.conf.defaultDataSourceName | ||
|
||
private var userSpecifiedSchema: Option[StructType] = None | ||
|
||
private var extraOptions = new scala.collection.mutable.HashMap[String, String] | ||
|
||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,144 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.sql | ||
|
||
import java.util.Properties | ||
|
||
import org.apache.spark.sql.execution.streaming.{Offset, Sink, Batch, StreamExecution} | ||
|
||
import scala.collection.JavaConverters._ | ||
|
||
import org.apache.spark.annotation.Experimental | ||
import org.apache.spark.sql.catalyst.{SqlParser, TableIdentifier} | ||
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, UnresolvedRelation} | ||
import org.apache.spark.sql.catalyst.plans.logical.{Project, InsertIntoTable} | ||
import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils | ||
import org.apache.spark.sql.execution.datasources.{CreateTableUsingAsSelect, ResolvedDataSource} | ||
import org.apache.spark.sql.sources.HadoopFsRelation | ||
|
||
|
||
/** | ||
* :: Experimental :: | ||
* Interface used to start a streaming query query execution. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
@Experimental | ||
final class DataStreamWriter private[sql](df: DataFrame) { | ||
|
||
/** | ||
* Specifies the underlying output data source. Built-in options include "parquet", "json", etc. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def format(source: String): DataStreamWriter = { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Similar to my suggestion for |
||
this.source = source | ||
this | ||
} | ||
|
||
/** | ||
* Adds an output option for the underlying data source. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def option(key: String, value: String): DataStreamWriter = { | ||
this.extraOptions += (key -> value) | ||
this | ||
} | ||
|
||
/** | ||
* (Scala-specific) Adds output options for the underlying data source. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def options(options: scala.collection.Map[String, String]): DataStreamWriter = { | ||
this.extraOptions ++= options | ||
this | ||
} | ||
|
||
/** | ||
* Adds output options for the underlying data source. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def options(options: java.util.Map[String, String]): DataStreamWriter = { | ||
this.options(options.asScala) | ||
this | ||
} | ||
|
||
/** | ||
* Partitions the output by the given columns on the file system. If specified, the output is | ||
* laid out on the file system similar to Hive's partitioning scheme.\ | ||
* @since 2.0.0 | ||
*/ | ||
@scala.annotation.varargs | ||
def partitionBy(colNames: String*): DataStreamWriter = { | ||
this.partitioningColumns = Option(colNames) | ||
this | ||
} | ||
|
||
/** | ||
* Starts the execution of the streaming query, which will continually output results to the given | ||
* path as new data arrives. The returned [[StandingQuery]] object can be used to interact with | ||
* the stream. | ||
* @since 2.0.0 | ||
*/ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. since 2.0.0 missing. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. added |
||
def start(path: String): StandingQuery = { | ||
this.extraOptions += ("path" -> path) | ||
start() | ||
} | ||
|
||
/** | ||
* Starts the execution of the streaming query, which will continually output results to the given | ||
* path as new data arrives. The returned [[StandingQuery]] object can be used to interact with | ||
* the stream. | ||
* | ||
* @since 2.0.0 | ||
*/ | ||
def start(): StandingQuery = { | ||
val sink = ResolvedDataSource.createSink( | ||
df.sqlContext, | ||
source, | ||
extraOptions.toMap) | ||
|
||
new StreamExecution(df.sqlContext, df.logicalPlan, sink) | ||
} | ||
|
||
private def normalizedParCols: Option[Seq[String]] = partitioningColumns.map { parCols => | ||
parCols.map { col => | ||
df.logicalPlan.output | ||
.map(_.name) | ||
.find(df.sqlContext.analyzer.resolver(_, col)) | ||
.getOrElse(throw new AnalysisException(s"Partition column $col not found in existing " + | ||
s"columns (${df.logicalPlan.output.map(_.name).mkString(", ")})")) | ||
} | ||
} | ||
|
||
/////////////////////////////////////////////////////////////////////////////////////// | ||
// Builder pattern config options | ||
/////////////////////////////////////////////////////////////////////////////////////// | ||
|
||
private var source: String = df.sqlContext.conf.defaultDataSourceName | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I dont expect any one use this in multithread, but nonetheless it better to make these volatile? |
||
|
||
private var mode: SaveMode = SaveMode.ErrorIfExists | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this mode does not seem to be used anywhere. |
||
|
||
private var extraOptions = new scala.collection.mutable.HashMap[String, String] | ||
|
||
private var partitioningColumns: Option[Seq[String]] = None | ||
|
||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,40 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.sql | ||
|
||
import org.apache.spark.annotation.DeveloperApi | ||
|
||
trait StandingQuery { | ||
|
||
/** | ||
* Stops the execution of the streaming query if it is running. This method blocks until the | ||
* thread performing execution has stopped. | ||
*/ | ||
def stop(): Unit | ||
|
||
/** Clears the indicator that a batch has completed. Used for testing. */ | ||
@DeveloperApi | ||
def clearBatchMarker(): Unit | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: its slightly non-intuitive with
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thats a good idea, though I think we need a java version to. I'd probably defer this, but we should keep this suggestion in mind when we finalize the testing API. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For future note, we can extend it to Then we can internally guarantee that the
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe a listener is better? |
||
|
||
/** | ||
* Awaits the completion of at least one streaming batch. Must be called after `clearBatchMarker` | ||
* to gurantee that a new batch has been processed. | ||
*/ | ||
@DeveloperApi | ||
def awaitBatchCompletion(): Unit | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Writing
reader.format("kafka")
is a quite weird, and will be weird for most non-fs streaming sources. Rather I propose having an alias calledsource
, which works nice for both batch and streaming -source("text")
,source("parquet")
,source("kafka")
all make sense.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
/cc @rxin
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this depends on what other methods are available on the reader/writer interfaces.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ah never mind -- i misunderstood it. your proposal makes sense