Skip to content

Commit

Permalink
[SPARK-33549][SQL] Remove configuration spark.sql.legacy.allowCastNum…
Browse files Browse the repository at this point in the history
…ericToTimestamp

### What changes were proposed in this pull request?

Remove SQL configuration spark.sql.legacy.allowCastNumericToTimestamp

### Why are the changes needed?

In the current master branch, there is a new configuration `spark.sql.legacy.allowCastNumericToTimestamp` which controls whether to cast Numeric types to Timestamp or not. The default value is true.

After #30260, the type conversion between Timestamp type and Numeric type is disallowed in ANSI mode. So, we don't need to a separate configuration `spark.sql.legacy.allowCastNumericToTimestamp` for disallowing the conversion. Users just need to set `spark.sql.ansi.enabled` for the behavior.

As the configuration is not in any released yet, we should remove the configuration to make things simpler.

### Does this PR introduce _any_ user-facing change?

No, since the configuration is not released yet.

### How was this patch tested?

Existing test cases

Closes #30493 from gengliangwang/LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP.

Authored-by: Gengliang Wang <gengliang.wang@databricks.com>
Signed-off-by: Wenchen Fan <wenchen@databricks.com>
  • Loading branch information
gengliangwang authored and cloud-fan committed Nov 25, 2020
1 parent 781e19c commit 19f3b89
Show file tree
Hide file tree
Showing 4 changed files with 2 additions and 43 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,7 @@ object Cast {
case (StringType, TimestampType) => true
case (BooleanType, TimestampType) => true
case (DateType, TimestampType) => true
case (_: NumericType, TimestampType) =>
SQLConf.get.getConf(SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP)
case (_: NumericType, TimestampType) => true

case (StringType, DateType) => true
case (TimestampType, DateType) => true
Expand Down Expand Up @@ -273,15 +272,7 @@ abstract class CastBase extends UnaryExpression with TimeZoneAwareExpression wit
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(
if (child.dataType.isInstanceOf[NumericType] && dataType.isInstanceOf[TimestampType]) {
s"cannot cast ${child.dataType.catalogString} to ${dataType.catalogString}," +
"you can enable the casting by setting " +
s"${SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP.key} to true," +
"but we strongly recommend using function " +
"TIMESTAMP_SECONDS/TIMESTAMP_MILLIS/TIMESTAMP_MICROS instead."
} else {
s"cannot cast ${child.dataType.catalogString} to ${dataType.catalogString}"
})
s"cannot cast ${child.dataType.catalogString} to ${dataType.catalogString}")
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2824,15 +2824,6 @@ object SQLConf {
.checkValue(_ > 0, "The timeout value must be positive")
.createWithDefault(10L)

val LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP =
buildConf("spark.sql.legacy.allowCastNumericToTimestamp")
.internal()
.doc("When true, allow casting numeric to timestamp," +
"when false, forbid the cast, more details in SPARK-31710")
.version("3.1.0")
.booleanConf
.createWithDefault(true)

val COALESCE_BUCKETS_IN_JOIN_ENABLED =
buildConf("spark.sql.bucketing.coalesceBucketsInJoin.enabled")
.doc("When true, if two bucketed tables with the different number of buckets are joined, " +
Expand Down Expand Up @@ -3550,9 +3541,6 @@ class SQLConf extends Serializable with Logging {

def integerGroupingIdEnabled: Boolean = getConf(SQLConf.LEGACY_INTEGER_GROUPING_ID)

def legacyAllowCastNumericToTimestamp: Boolean =
getConf(SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP)

def metadataCacheTTL: Long = getConf(StaticSQLConf.METADATA_CACHE_TTL_SECONDS)

def coalesceBucketsInJoinEnabled: Boolean = getConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1311,20 +1311,6 @@ class CastSuite extends CastSuiteBase {
}
}

test("SPARK-31710: fail casting from numeric to timestamp if it is forbidden") {
Seq(true, false).foreach { enable =>
withSQLConf(SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP.key -> enable.toString) {
assert(cast(2.toByte, TimestampType).resolved == enable)
assert(cast(10.toShort, TimestampType).resolved == enable)
assert(cast(3, TimestampType).resolved == enable)
assert(cast(10L, TimestampType).resolved == enable)
assert(cast(Decimal(1.2), TimestampType).resolved == enable)
assert(cast(1.7f, TimestampType).resolved == enable)
assert(cast(2.3d, TimestampType).resolved == enable)
}
}
}

test("SPARK-32828: cast from a derived user-defined type to a base type") {
val v = Literal.create(Row(1), new ExampleSubTypeUDT())
checkEvaluation(cast(v, new ExampleBaseTypeUDT), Row(1))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
private val originalInMemoryPartitionPruning = TestHive.conf.inMemoryPartitionPruning
private val originalCrossJoinEnabled = TestHive.conf.crossJoinEnabled
private val originalSessionLocalTimeZone = TestHive.conf.sessionLocalTimeZone
private val originalLegacyAllowCastNumericToTimestamp =
TestHive.conf.legacyAllowCastNumericToTimestamp

def testCases: Seq[(String, File)] = {
hiveQueryDir.listFiles.map(f => f.getName.stripSuffix(".q") -> f)
Expand All @@ -61,8 +59,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
// Fix session local timezone to America/Los_Angeles for those timezone sensitive tests
// (timestamp_*)
TestHive.setConf(SQLConf.SESSION_LOCAL_TIMEZONE, "America/Los_Angeles")
// Ensures that cast numeric to timestamp enabled so that we can test them
TestHive.setConf(SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP, true)
RuleExecutor.resetMetrics()
}

Expand All @@ -73,8 +69,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
TestHive.setConf(SQLConf.IN_MEMORY_PARTITION_PRUNING, originalInMemoryPartitionPruning)
TestHive.setConf(SQLConf.CROSS_JOINS_ENABLED, originalCrossJoinEnabled)
TestHive.setConf(SQLConf.SESSION_LOCAL_TIMEZONE, originalSessionLocalTimeZone)
TestHive.setConf(SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP,
originalLegacyAllowCastNumericToTimestamp)

// For debugging dump some statistics about how much time was spent in various optimizer rules
logWarning(RuleExecutor.dumpTimeSpent())
Expand Down

0 comments on commit 19f3b89

Please sign in to comment.