From 42aca3d2c02cccf168fcac97a5d3f594f21b34ab Mon Sep 17 00:00:00 2001 From: fjh100456 Date: Fri, 15 Sep 2017 18:30:20 +0800 Subject: [PATCH] [SPARK-21786][SQL] The 'spark.sql.parquet.compression.codec' configuration doesn't take effect on tables with partition field(s) Fix test problem --- .../src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala index 90969d7c87b64..715a018ed5569 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala @@ -779,7 +779,7 @@ class InsertSuite extends QueryTest with TestHiveSingleton with BeforeAndAfter def getTableSize(tableName: String, codec: String, isPartitioned: Boolean = false): Long = { insertOverwriteTable(tableName, codec, isPartitioned) - val path = s"${tmpDir.toURI.toString.stripSuffix("/")}/$tableName" + val path = s"${tmpDir.getPath.stripSuffix("/")}/$tableName" val dir = new File(path) val files = getDirFiles(dir).filter(_.getName.startsWith("part-")) files.map(_.length()).sum