From c47d9b1bcf61f65a7078d43361b438fd56d0af81 Mon Sep 17 00:00:00 2001 From: panbingkun Date: Wed, 2 Aug 2023 10:51:16 +0500 Subject: [PATCH] [SPARK-44555][SQL] Use checkError() to check Exception in command Suite & assign some error class names ### What changes were proposed in this pull request? The pr aims to 1. Use `checkError()` to check Exception in `command` Suite. 2. Assign some error class names, include: `UNSUPPORTED_FEATURE.PURGE_PARTITION` and `UNSUPPORTED_FEATURE.PURGE_TABLE`. ### Why are the changes needed? The changes improve the error framework. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? - Manually test. - Pass GA. Closes #42169 from panbingkun/checkError_for_command. Authored-by: panbingkun Signed-off-by: Max Gekk (cherry picked from commit 4ec27c3801aaa0cbba3e086c278a0ff96260b84a) Signed-off-by: Max Gekk --- .../main/resources/error/error-classes.json | 10 +++++++ ...ditions-unsupported-feature-error-class.md | 8 +++++ .../SupportsAtomicPartitionManagement.java | 3 +- .../catalog/SupportsPartitionManagement.java | 3 +- .../sql/connector/catalog/TableCatalog.java | 3 +- .../sql/errors/QueryExecutionErrors.scala | 12 ++++++++ ...pportsAtomicPartitionManagementSuite.scala | 13 ++++---- .../SupportsPartitionManagementSuite.scala | 13 ++++---- .../v1/AlterTableAddPartitionSuite.scala | 14 +++++---- .../v1/AlterTableDropPartitionSuite.scala | 12 ++++---- .../command/v1/AlterTableRenameSuite.scala | 11 ++++--- .../v1/AlterTableSetLocationSuite.scala | 11 ++++--- .../command/v1/ShowCreateTableSuite.scala | 12 ++++---- .../command/v1/ShowTablesSuite.scala | 22 +++++++++----- .../command/v1/TruncateTableSuite.scala | 11 ++++--- .../v2/AlterTableDropPartitionSuite.scala | 12 +++++--- .../v2/AlterTableRecoverPartitionsSuite.scala | 11 ++++--- .../v2/AlterTableSetLocationSuite.scala | 12 ++++---- .../execution/command/v2/DropTableSuite.scala | 12 +++++--- .../command/v2/MsckRepairTableSuite.scala | 11 ++++--- .../command/v2/ShowTablesSuite.scala | 11 ++++--- .../command/ShowCreateTableSuite.scala | 30 +++++++++++-------- 22 files changed, 172 insertions(+), 85 deletions(-) diff --git a/common/utils/src/main/resources/error/error-classes.json b/common/utils/src/main/resources/error/error-classes.json index 385435c740ea4..480ec63628394 100644 --- a/common/utils/src/main/resources/error/error-classes.json +++ b/common/utils/src/main/resources/error/error-classes.json @@ -2956,6 +2956,16 @@ "Pivoting by the value '' of the column data type ." ] }, + "PURGE_PARTITION" : { + "message" : [ + "Partition purge." + ] + }, + "PURGE_TABLE" : { + "message" : [ + "Purge table." + ] + }, "PYTHON_UDF_IN_ON_CLAUSE" : { "message" : [ "Python UDF in the ON clause of a JOIN. In case of an INNNER JOIN consider rewriting to a CROSS JOIN with a WHERE clause." diff --git a/docs/sql-error-conditions-unsupported-feature-error-class.md b/docs/sql-error-conditions-unsupported-feature-error-class.md index aa1c622c458ba..7a60dc76fa640 100644 --- a/docs/sql-error-conditions-unsupported-feature-error-class.md +++ b/docs/sql-error-conditions-unsupported-feature-error-class.md @@ -141,6 +141,14 @@ PIVOT clause following a GROUP BY clause. Consider pushing the GROUP BY into a s Pivoting by the value '``' of the column data type ``. +## PURGE_PARTITION + +Partition purge. + +## PURGE_TABLE + +Purge table. + ## PYTHON_UDF_IN_ON_CLAUSE Python UDF in the ON clause of a `` JOIN. In case of an INNNER JOIN consider rewriting to a CROSS JOIN with a WHERE clause. diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java index 3eb9bf9f91349..48c6392d2b8ff 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagement.java @@ -23,6 +23,7 @@ import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionException; import org.apache.spark.sql.catalyst.analysis.PartitionsAlreadyExistException; +import org.apache.spark.sql.errors.QueryExecutionErrors; /** * An atomic partition interface of {@link Table} to operate multiple partitions atomically. @@ -107,7 +108,7 @@ void createPartitions( */ default boolean purgePartitions(InternalRow[] idents) throws NoSuchPartitionException, UnsupportedOperationException { - throw new UnsupportedOperationException("Partition purge is not supported"); + throw QueryExecutionErrors.unsupportedPurgePartitionError(); } /** diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java index 4830e193222fc..e7a2af29a00bc 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsPartitionManagement.java @@ -23,6 +23,7 @@ import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionException; import org.apache.spark.sql.catalyst.analysis.PartitionsAlreadyExistException; +import org.apache.spark.sql.errors.QueryExecutionErrors; import org.apache.spark.sql.types.StructType; /** @@ -88,7 +89,7 @@ void createPartition( */ default boolean purgePartition(InternalRow ident) throws NoSuchPartitionException, UnsupportedOperationException { - throw new UnsupportedOperationException("Partition purge is not supported"); + throw QueryExecutionErrors.unsupportedPurgePartitionError(); } /** diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java index 6cfd5ab1b6bee..d99e7e14b0117 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java @@ -23,6 +23,7 @@ import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException; import org.apache.spark.sql.errors.QueryCompilationErrors; +import org.apache.spark.sql.errors.QueryExecutionErrors; import org.apache.spark.sql.types.StructType; import java.util.Collections; @@ -256,7 +257,7 @@ Table alterTable( * @since 3.1.0 */ default boolean purgeTable(Identifier ident) throws UnsupportedOperationException { - throw new UnsupportedOperationException("Purge table is not supported."); + throw QueryExecutionErrors.unsupportedPurgeTableError(); } /** diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala index c7245eb0a6d60..89c080409e2ce 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala @@ -2702,4 +2702,16 @@ private[sql] object QueryExecutionErrors extends QueryErrorsBase with ExecutionE errorClass = "MERGE_CARDINALITY_VIOLATION", messageParameters = Map.empty) } + + def unsupportedPurgePartitionError(): SparkUnsupportedOperationException = { + new SparkUnsupportedOperationException( + errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION", + messageParameters = Map.empty) + } + + def unsupportedPurgeTableError(): SparkUnsupportedOperationException = { + new SparkUnsupportedOperationException( + errorClass = "UNSUPPORTED_FEATURE.PURGE_TABLE", + messageParameters = Map.empty) + } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala index 90ed106d8ed1b..4d25fda92ec1e 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsAtomicPartitionManagementSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.connector.catalog import java.util -import org.apache.spark.SparkFunSuite +import org.apache.spark.{SparkFunSuite, SparkUnsupportedOperationException} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, PartitionsAlreadyExistException} import org.apache.spark.sql.connector.expressions.{LogicalExpressions, NamedReference, Transform} @@ -117,10 +117,13 @@ class SupportsAtomicPartitionManagementSuite extends SparkFunSuite { partTable.createPartitions( partIdents, Array(new util.HashMap[String, String](), new util.HashMap[String, String]())) - val errMsg = intercept[UnsupportedOperationException] { - partTable.purgePartitions(partIdents) - }.getMessage - assert(errMsg.contains("purge is not supported")) + checkError( + exception = intercept[SparkUnsupportedOperationException] { + partTable.purgePartitions(partIdents) + }, + errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION", + parameters = Map.empty + ) } test("dropPartitions failed if partition not exists") { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala index 40114d063aada..501f363d7dc6b 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/SupportsPartitionManagementSuite.scala @@ -21,7 +21,7 @@ import java.util import scala.collection.JavaConverters._ -import org.apache.spark.SparkFunSuite +import org.apache.spark.{SparkFunSuite, SparkUnsupportedOperationException} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, PartitionsAlreadyExistException} import org.apache.spark.sql.connector.expressions.{LogicalExpressions, NamedReference, Transform} @@ -89,10 +89,13 @@ class SupportsPartitionManagementSuite extends SparkFunSuite { val table = catalog.loadTable(ident) val partTable = new InMemoryPartitionTable( table.name(), table.schema(), table.partitioning(), table.properties()) - val errMsg = intercept[UnsupportedOperationException] { - partTable.purgePartition(InternalRow.apply("3")) - }.getMessage - assert(errMsg.contains("purge is not supported")) + checkError( + exception = intercept[SparkUnsupportedOperationException] { + partTable.purgePartition(InternalRow.apply("3")) + }, + errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION", + parameters = Map.empty + ) } test("replacePartitionMetadata") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala index d41fd6b00f8aa..71f04159638aa 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala @@ -39,11 +39,15 @@ trait AlterTableAddPartitionSuiteBase extends command.AlterTableAddPartitionSuit test("empty string as partition value") { withNamespaceAndTable("ns", "tbl") { t => sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") - val errMsg = intercept[AnalysisException] { - sql(s"ALTER TABLE $t ADD PARTITION (p1 = '')") - }.getMessage - assert(errMsg.contains("Partition spec is invalid. " + - "The spec ([p1=]) contains an empty partition column value")) + checkError( + exception = intercept[AnalysisException] { + sql(s"ALTER TABLE $t ADD PARTITION (p1 = '')") + }, + errorClass = "_LEGACY_ERROR_TEMP_1076", + parameters = Map( + "details" -> "The spec ([p1=]) contains an empty partition column value" + ) + ) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala index cc57e10a16887..8d403429ca5d2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala @@ -79,11 +79,13 @@ class AlterTableDropPartitionSuite test("empty string as partition value") { withNamespaceAndTable("ns", "tbl") { t => sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") - val errMsg = intercept[AnalysisException] { - sql(s"ALTER TABLE $t DROP PARTITION (p1 = '')") - }.getMessage - assert(errMsg.contains("Partition spec is invalid. " + - "The spec ([p1=]) contains an empty partition column value")) + checkError( + exception = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (p1 = '')") + }, + errorClass = "_LEGACY_ERROR_TEMP_1076", + parameters = Map("details" -> "The spec ([p1=]) contains an empty partition column value") + ) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala index 3efd6d8a95755..dfbdc6a4ca78e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala @@ -37,10 +37,13 @@ trait AlterTableRenameSuiteBase extends command.AlterTableRenameSuiteBase with Q sql(s"CREATE NAMESPACE $catalog.src_ns") val src = dst.replace("dst", "src") sql(s"CREATE TABLE $src (c0 INT) $defaultUsing") - val errMsg = intercept[AnalysisException] { - sql(s"ALTER TABLE $src RENAME TO dst_ns.dst_tbl") - }.getMessage - assert(errMsg.contains("source and destination databases do not match")) + checkError( + exception = intercept[AnalysisException] { + sql(s"ALTER TABLE $src RENAME TO dst_ns.dst_tbl") + }, + errorClass = "_LEGACY_ERROR_TEMP_1073", + parameters = Map("db" -> "src_ns", "newDb" -> "dst_ns") + ) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala index d0f1a83594284..53b9853f36c8c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableSetLocationSuite.scala @@ -89,10 +89,13 @@ trait AlterTableSetLocationSuiteBase extends command.AlterTableSetLocationSuiteB checkLocation(tableIdent, new URI("/path/to/part/ways2"), Some(partSpec)) } withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE $t PARTITION (A='1', B='2') SET LOCATION '/path/to/part/ways3'") - }.getMessage - assert(e.contains("not a valid partition column")) + checkError( + exception = intercept[AnalysisException] { + sql(s"ALTER TABLE $t PARTITION (A='1', B='2') SET LOCATION '/path/to/part/ways3'") + }, + errorClass = "_LEGACY_ERROR_TEMP_1231", + parameters = Map("key" -> "A", "tblName" -> "`spark_catalog`.`ns`.`tbl`") + ) } sessionCatalog.setCurrentDatabase("ns") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala index b9fcf76ad7cdc..36fde23db5c03 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala @@ -158,11 +158,13 @@ trait ShowCreateTableSuiteBase extends command.ShowCreateTableSuiteBase """.stripMargin ) - val cause = intercept[AnalysisException] { - getShowCreateDDL(t, true) - } - - assert(cause.getMessage.contains("Use `SHOW CREATE TABLE` without `AS SERDE` instead")) + checkError( + exception = intercept[AnalysisException] { + getShowCreateDDL(t, true) + }, + errorClass = "_LEGACY_ERROR_TEMP_1274", + parameters = Map("table" -> "`spark_catalog`.`ns1`.`tbl`") + ) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala index 4db42f1d7202d..5bda7d002dc51 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala @@ -53,10 +53,13 @@ trait ShowTablesSuiteBase extends command.ShowTablesSuiteBase with command.Tests } test("only support single-level namespace") { - val errMsg = intercept[AnalysisException] { - runShowTablesSql("SHOW TABLES FROM a.b", Seq()) - }.getMessage - assert(errMsg.contains("Nested databases are not supported by v1 session catalog: a.b")) + checkError( + exception = intercept[AnalysisException] { + runShowTablesSql("SHOW TABLES FROM a.b", Seq()) + }, + errorClass = "_LEGACY_ERROR_TEMP_1126", + parameters = Map("catalog" -> "a.b") + ) } test("SHOW TABLE EXTENDED from default") { @@ -96,10 +99,13 @@ trait ShowTablesSuiteBase extends command.ShowTablesSuiteBase with command.Tests Seq( s"SHOW TABLES IN $catalog", s"SHOW TABLE EXTENDED IN $catalog LIKE '*tbl'").foreach { showTableCmd => - val errMsg = intercept[AnalysisException] { - sql(showTableCmd) - }.getMessage - assert(errMsg.contains("Database from v1 session catalog is not specified")) + checkError( + exception = intercept[AnalysisException] { + sql(showTableCmd) + }, + errorClass = "_LEGACY_ERROR_TEMP_1125", + parameters = Map.empty + ) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala index 7da03db6f7371..cd0a057284705 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/TruncateTableSuite.scala @@ -195,10 +195,13 @@ class TruncateTableSuite extends TruncateTableSuiteBase with CommandSuiteBase { withNamespaceAndTable("ns", "tbl") { t => (("a", "b") :: Nil).toDF().write.parquet(tempDir.getCanonicalPath) sql(s"CREATE TABLE $t $defaultUsing LOCATION '${tempDir.toURI}'") - val errMsg = intercept[AnalysisException] { - sql(s"TRUNCATE TABLE $t") - }.getMessage - assert(errMsg.contains("Operation not allowed: TRUNCATE TABLE on external tables")) + checkError( + exception = intercept[AnalysisException] { + sql(s"TRUNCATE TABLE $t") + }, + errorClass = "_LEGACY_ERROR_TEMP_1266", + parameters = Map("tableIdentWithDB" -> "`spark_catalog`.`ns`.`tbl`") + ) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala index f2d9099025717..2df7eebaecc81 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala @@ -17,6 +17,7 @@ package org.apache.spark.sql.execution.command.v2 +import org.apache.spark.SparkUnsupportedOperationException import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute import org.apache.spark.sql.catalyst.util.quoteIdentifier @@ -56,10 +57,13 @@ class AlterTableDropPartitionSuite sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") sql(s"ALTER TABLE $t ADD PARTITION (id=1)") try { - val errMsg = intercept[UnsupportedOperationException] { - sql(s"ALTER TABLE $t DROP PARTITION (id=1) PURGE") - }.getMessage - assert(errMsg.contains("purge is not supported")) + checkError( + exception = intercept[SparkUnsupportedOperationException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1) PURGE") + }, + errorClass = "UNSUPPORTED_FEATURE.PURGE_PARTITION", + parameters = Map.empty + ) } finally { sql(s"ALTER TABLE $t DROP PARTITION (id=1)") } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala index a44e346d0348c..ff6ff0df5306a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRecoverPartitionsSuite.scala @@ -31,10 +31,13 @@ class AlterTableRecoverPartitionsSuite test("partition recovering of v2 tables is not supported") { withNamespaceAndTable("ns", "tbl") { t => spark.sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing") - val errMsg = intercept[AnalysisException] { - sql(s"ALTER TABLE $t RECOVER PARTITIONS") - }.getMessage - assert(errMsg.contains("ALTER TABLE ... RECOVER PARTITIONS is not supported for v2 tables")) + checkError( + exception = intercept[AnalysisException] { + sql(s"ALTER TABLE $t RECOVER PARTITIONS") + }, + errorClass = "NOT_SUPPORTED_COMMAND_FOR_V2_TABLE", + parameters = Map("cmd" -> "ALTER TABLE ... RECOVER PARTITIONS") + ) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala index babd3bb3714f2..0ac35452b60a5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableSetLocationSuite.scala @@ -56,11 +56,13 @@ class AlterTableSetLocationSuite withNamespaceAndTable("ns", "tbl") { t => sql(s"CREATE TABLE $t (id int) USING foo") - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE $t PARTITION(ds='2017-06-10') SET LOCATION 's3://bucket/path'") - } - assert(e.getMessage.contains( - "ALTER TABLE SET LOCATION does not support partition for v2 tables")) + checkError( + exception = intercept[AnalysisException] { + sql(s"ALTER TABLE $t PARTITION(ds='2017-06-10') SET LOCATION 's3://bucket/path'") + }, + errorClass = "_LEGACY_ERROR_TEMP_1045", + parameters = Map.empty + ) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala index 9c9b7d3049c7a..83bded7ab4f52 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala @@ -17,6 +17,7 @@ package org.apache.spark.sql.execution.command.v2 +import org.apache.spark.SparkUnsupportedOperationException import org.apache.spark.sql.Row import org.apache.spark.sql.connector.InMemoryTableSessionCatalog import org.apache.spark.sql.execution.command @@ -29,11 +30,14 @@ class DropTableSuite extends command.DropTableSuiteBase with CommandSuiteBase { test("purge option") { withNamespaceAndTable("ns", "tbl") { t => createTable(t) - val errMsg = intercept[UnsupportedOperationException] { - sql(s"DROP TABLE $catalog.ns.tbl PURGE") - }.getMessage // The default TableCatalog.purgeTable implementation throws an exception. - assert(errMsg.contains("Purge table is not supported")) + checkError( + exception = intercept[SparkUnsupportedOperationException] { + sql(s"DROP TABLE $catalog.ns.tbl PURGE") + }, + errorClass = "UNSUPPORTED_FEATURE.PURGE_TABLE", + parameters = Map.empty + ) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala index d4b23e50786eb..381e55b49393c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/MsckRepairTableSuite.scala @@ -32,10 +32,13 @@ class MsckRepairTableSuite test("repairing of v2 tables is not supported") { withNamespaceAndTable("ns", "tbl") { t => spark.sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing") - val errMsg = intercept[AnalysisException] { - sql(s"MSCK REPAIR TABLE $t") - }.getMessage - assert(errMsg.contains("MSCK REPAIR TABLE is not supported for v2 tables")) + checkError( + exception = intercept[AnalysisException] { + sql(s"MSCK REPAIR TABLE $t") + }, + errorClass = "NOT_SUPPORTED_COMMAND_FOR_V2_TABLE", + parameters = Map("cmd" -> "MSCK REPAIR TABLE") + ) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala index e7e5c71c9ef0a..9a67eab055e78 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala @@ -84,10 +84,13 @@ class ShowTablesSuite extends command.ShowTablesSuiteBase with CommandSuiteBase val table = "people" withTable(s"$catalog.$table") { sql(s"CREATE TABLE $catalog.$table (name STRING, id INT) $defaultUsing") - val errMsg = intercept[AnalysisException] { - sql(s"SHOW TABLE EXTENDED FROM $catalog LIKE '*$table*'").collect() - }.getMessage - assert(errMsg.contains("SHOW TABLE EXTENDED is not supported for v2 tables")) + checkError( + exception = intercept[AnalysisException] { + sql(s"SHOW TABLE EXTENDED FROM $catalog LIKE '*$table*'").collect() + }, + errorClass = "_LEGACY_ERROR_TEMP_1200", + parameters = Map("name" -> "SHOW TABLE EXTENDED") + ) } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala index 55a27f336db40..5f8f250f8e9a8 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala @@ -357,11 +357,17 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite """.stripMargin ) - val cause = intercept[AnalysisException] { - checkCreateSparkTableAsHive("t1") - } - - assert(cause.getMessage.contains("unsupported serde configuration")) + checkError( + exception = intercept[AnalysisException] { + checkCreateSparkTableAsHive("t1") + }, + errorClass = "_LEGACY_ERROR_TEMP_1273", + parameters = Map( + "table" -> "t1", + "configs" -> (" SERDE: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe " + + "INPUTFORMAT: org.apache.hadoop.hive.ql.io.RCFileInputFormat " + + "OUTPUTFORMAT: org.apache.hadoop.hive.ql.io.RCFileOutputFormat")) + ) } } @@ -423,13 +429,13 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite """.stripMargin ) - - val cause = intercept[AnalysisException] { - sql("SHOW CREATE TABLE t1") - } - - assert(cause.getMessage.contains( - "SHOW CREATE TABLE doesn't support transactional Hive table")) + checkError( + exception = intercept[AnalysisException] { + sql("SHOW CREATE TABLE t1") + }, + errorClass = "_LEGACY_ERROR_TEMP_1272", + parameters = Map("table" -> "`spark_catalog`.`default`.`t1`") + ) } } }