diff --git a/common/utils/src/main/resources/error/error-conditions.json b/common/utils/src/main/resources/error/error-conditions.json index 29eda228c2daa..0ebeea9aed8d2 100644 --- a/common/utils/src/main/resources/error/error-conditions.json +++ b/common/utils/src/main/resources/error/error-conditions.json @@ -6656,11 +6656,6 @@ "Type does not support ordered operations." ] }, - "_LEGACY_ERROR_TEMP_2011" : { - "message" : [ - "Unexpected data type ." - ] - }, "_LEGACY_ERROR_TEMP_2013" : { "message" : [ "Negative values found in " diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala index 2c27da3cf6e15..5444ab6845867 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala @@ -21,12 +21,12 @@ import java.util.Locale import scala.util.control.Exception.allCatch +import org.apache.spark.SparkException import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.analysis.TypeCoercion import org.apache.spark.sql.catalyst.expressions.ExprUtils import org.apache.spark.sql.catalyst.util.{DateFormatter, TimestampFormatter} import org.apache.spark.sql.catalyst.util.LegacyDateFormats.FAST_DATE_FORMAT -import org.apache.spark.sql.errors.QueryExecutionErrors import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ @@ -138,7 +138,7 @@ class CSVInferSchema(val options: CSVOptions) extends Serializable { case BooleanType => tryParseBoolean(field) case StringType => StringType case other: DataType => - throw QueryExecutionErrors.dataTypeUnexpectedError(other) + throw SparkException.internalError(s"Unexpected data type $other") } compatibleType(typeSoFar, typeElemInfer).getOrElse(StringType) } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala index 4987e31b49911..8ad062ab0e2f9 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala @@ -21,6 +21,7 @@ import java.nio.ByteBuffer import com.google.common.primitives.{Doubles, Ints, Longs} +import org.apache.spark.SparkException import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, TypeCheckResult} import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{DataTypeMismatch, TypeCheckSuccess} @@ -32,7 +33,6 @@ import org.apache.spark.sql.catalyst.types.PhysicalNumericType import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData} import org.apache.spark.sql.catalyst.util.QuantileSummaries import org.apache.spark.sql.catalyst.util.QuantileSummaries.{defaultCompressThreshold, Stats} -import org.apache.spark.sql.errors.QueryExecutionErrors import org.apache.spark.sql.types._ import org.apache.spark.util.ArrayImplicits._ @@ -189,7 +189,7 @@ case class ApproximatePercentile( PhysicalNumericType.numeric(n) .toDouble(value.asInstanceOf[PhysicalNumericType#InternalType]) case other: DataType => - throw QueryExecutionErrors.dataTypeUnexpectedError(other) + throw SparkException.internalError(s"Unexpected data type $other") } buffer.add(doubleValue) } @@ -214,7 +214,7 @@ case class ApproximatePercentile( case DoubleType => doubleResult case _: DecimalType => doubleResult.map(Decimal(_)) case other: DataType => - throw QueryExecutionErrors.dataTypeUnexpectedError(other) + throw SparkException.internalError(s"Unexpected data type $other") } if (result.length == 0) { null diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala index 0b37cf951a29b..2ab86a5c5f03f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala @@ -384,18 +384,6 @@ private[sql] object QueryExecutionErrors extends QueryErrorsBase with ExecutionE s"The aggregate window function ${toSQLId(funcName)} does not support merging.") } - def dataTypeUnexpectedError(dataType: DataType): SparkUnsupportedOperationException = { - new SparkUnsupportedOperationException( - errorClass = "_LEGACY_ERROR_TEMP_2011", - messageParameters = Map("dataType" -> dataType.catalogString)) - } - - def typeUnsupportedError(dataType: DataType): SparkIllegalArgumentException = { - new SparkIllegalArgumentException( - errorClass = "_LEGACY_ERROR_TEMP_2011", - messageParameters = Map("dataType" -> dataType.toString())) - } - def negativeValueUnexpectedError( frequencyExpression : Expression): SparkIllegalArgumentException = { new SparkIllegalArgumentException( diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala index 676a2ab64d0a3..ffdca65151052 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala @@ -29,7 +29,7 @@ import scala.util.control.NonFatal import org.apache.hadoop.fs.Path -import org.apache.spark.SparkRuntimeException +import org.apache.spark.{SparkException, SparkRuntimeException} import org.apache.spark.sql.catalyst.{InternalRow, SQLConfHelper} import org.apache.spark.sql.catalyst.analysis.TypeCoercion import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec @@ -550,7 +550,7 @@ object PartitioningUtils extends SQLConfHelper { Cast(Literal(unescapePathName(value)), it).eval() case BinaryType => value.getBytes() case BooleanType => value.toBoolean - case dt => throw QueryExecutionErrors.typeUnsupportedError(dt) + case dt => throw SparkException.internalError(s"Unsupported partition type: $dt") } def validatePartitionColumn(