Skip to content
This repository has been archived by the owner on Sep 18, 2023. It is now read-only.

Commit

Permalink
remove testing config
Browse files Browse the repository at this point in the history
  • Loading branch information
rui-mo committed Apr 22, 2021
1 parent 85b5cb7 commit b1887a1
Show file tree
Hide file tree
Showing 59 changed files with 74 additions and 84 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,11 @@ case class ColumnarGuardRule(conf: SparkConf) extends Rule[SparkPlan] {
val enableColumnarShuffledHashJoin = columnarConf.enableColumnarShuffledHashJoin
val enableColumnarBroadcastExchange = columnarConf.enableColumnarBroadcastExchange
val enableColumnarBroadcastJoin = columnarConf.enableColumnarBroadcastJoin

val testing = columnarConf.isTesting

private def tryConvertToColumnar(plan: SparkPlan): Boolean = {
try {
val columnarPlan = plan match {
case plan: BatchScanExec =>
if (testing) {
// disable ColumnarBatchScanExec according to config
return false
}
if (!enableColumnarBatchScan) return false
new ColumnarBatchScanExec(plan.output, plan.scan)
case plan: FileSourceScanExec =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,6 @@ class ColumnarPluginConfig(conf: SQLConf) extends Logging {
// The supported customized compression codec is lz4 and fastpfor.
val columnarShuffleUseCustomizedCompressionCodec: String =
conf.getConfString("spark.oap.sql.columnar.shuffle.customizedCompression.codec", "lz4")

// a helper flag to check if it's in unit test
val isTesting: Boolean =
conf.getConfString("spark.oap.sql.columnar.testing", "false").toBoolean

val numaBindingInfo: ColumnarNumaBindingInfo = {
val enableNumaBinding: Boolean =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

setupTestData()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class DataFrameJoinSuite extends QueryTest
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

test("join - join using") {
val df = Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class DataFrameSuite extends QueryTest
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

test("analysis error should be eagerly reported") {
intercept[Exception] { testData.select("nonExistentName") }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

private def catalog(name: String): TableCatalog = {
spark.sessionState.catalogManager.catalog(name).asTableCatalog
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class DatasetCacheSuite extends QueryTest
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

/**
* Asserts that a cached [[Dataset]] will be built using the given number of other cached results.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class DeprecatedAPISuite extends QueryTest with SharedSparkSession {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

private lazy val doubleData = (1 to 10).map(i => DoubleData(i * 0.2 - 1, i * -0.2 + 1)).toDF()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

test("SPARK-23034 show rdd names in RDD scan nodes (Dataset)") {
val rddWithName = spark.sparkContext.parallelize(Row(1, "abc") :: Nil).setName("testRdd")
Expand Down Expand Up @@ -389,7 +389,7 @@ class ExplainSuiteAE extends ExplainSuiteHelper with EnableAdaptiveExecutionSuit
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

ignore("Explain formatted") {
val df1 = Seq((1, 2), (2, 3)).toDF("k", "v1")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class FileBasedDataSourceSuite extends QueryTest
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class JoinHintSuite extends PlanTest with SharedSparkSession with AdaptiveSparkP
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

lazy val df = spark.range(10)
lazy val df1 = df.selectExpr("id as a1", "id as a2")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

setupTestData()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class StatisticsCollectionSuite extends StatisticsCollectionTestBase with Shared
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

test("estimates the size of a limit 0 on outer join") {
withTempView("test") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class SubquerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set("spark.oap.sql.columnar.hashCompare", "true")

setupTestData()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class UDFSuite extends QueryTest with SharedSparkSession {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

test("built-in fixed arity expressions") {
val df = spark.emptyDataFrame
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class UserDefinedTypeSuite extends QueryTest with SharedSparkSession with Parque
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

private lazy val pointsRDD = Seq(
MyLabeledPoint(1.0, new TestUDT.MyDenseVector(Array(0.1, 1.0))),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class DataSourceV2DataFrameSessionCatalogSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override protected def doInsert(tableName: String, insert: DataFrame, mode: SaveMode): Unit = {
val dfw = insert.write.format(v2Format)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class DataSourceV2DataFrameSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

before {
spark.conf.set("spark.sql.catalog.testcat", classOf[InMemoryTableCatalog].getName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class DataSourceV2SQLSessionCatalogSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override protected val catalogAndNamespace = ""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class DataSourceV2SQLSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

private val v2Source = classOf[FakeV2Provider].getName
override protected val v2Format = v2Source
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class DataSourceV2Suite extends QueryTest with SharedSparkSession with AdaptiveS
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

private def getBatch(query: DataFrame): AdvancedBatch = {
query.queryExecution.executedPlan.collect {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class FileDataSourceV2FallBackSuite extends QueryTest with SharedSparkSession {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

test("Fall back to v1 when writing to file with read only FileDataSourceV2") {
val df = spark.range(10).toDF()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ abstract class InsertIntoTests(
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

/**
* Insert data into a table using the insertInto statement. Implementations can be in SQL
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class SupportsCatalogOptionsSuite extends QueryTest with SharedSparkSession with
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

private val catalogName = "testcat"
private val format = classOf[CatalogSupportingInMemoryTableProvider].getName
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class DataSourceScanExecRedactionSuite extends DataSourceScanRedactionTest {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set(SQLConf.USE_V1_SOURCE_LIST.key, "orc")

override protected def getRootPath(df: DataFrame): Path =
Expand Down Expand Up @@ -162,7 +162,7 @@ class DataSourceV2ScanExecRedactionSuite extends DataSourceScanRedactionTest {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set(SQLConf.USE_V1_SOURCE_LIST.key, "")

override protected def getRootPath(df: DataFrame): Path =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class SimpleSQLViewSuite extends SQLViewSuite with SharedSparkSession {
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class InMemoryCatalogedDDLSuite extends DDLSuite with SharedSparkSession {
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override def afterEach(): Unit = {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class FileFormatWriterSuite
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

test("empty file should be skipped while write to file") {
withTempPath { path =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class FileSourceStrategySuite extends QueryTest with SharedSparkSession with Pre
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set("spark.sql.parquet.enableVectorizedReader", "false")
.set("spark.sql.orc.enableVectorizedReader", "false")
.set("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "false")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class CSVReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
}

class HeaderCSVReadSchemaSuite
Expand Down Expand Up @@ -113,7 +113,7 @@ class HeaderCSVReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
}

class JsonReadSchemaSuite
Expand Down Expand Up @@ -149,7 +149,7 @@ class JsonReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
}

class OrcReadSchemaSuite
Expand Down Expand Up @@ -181,7 +181,7 @@ class OrcReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override def beforeAll(): Unit = {
super.beforeAll()
Expand Down Expand Up @@ -226,7 +226,7 @@ class VectorizedOrcReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override def beforeAll(): Unit = {
super.beforeAll()
Expand Down Expand Up @@ -272,7 +272,7 @@ class MergedOrcReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key, "true")
}

Expand Down Expand Up @@ -304,7 +304,7 @@ class ParquetReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override def beforeAll(): Unit = {
super.beforeAll()
Expand Down Expand Up @@ -347,7 +347,7 @@ class VectorizedParquetReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override def beforeAll(): Unit = {
super.beforeAll()
Expand Down Expand Up @@ -390,7 +390,7 @@ class MergedParquetReadSchemaSuite
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")

override def beforeAll(): Unit = {
super.beforeAll()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2363,7 +2363,7 @@ class CSVv1Suite extends CSVSuite {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set(SQLConf.USE_V1_SOURCE_LIST, "csv")
}

Expand All @@ -2387,7 +2387,7 @@ class CSVv2Suite extends CSVSuite {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}

Expand All @@ -2411,6 +2411,6 @@ class CSVLegacyTimeParserSuite extends CSVSuite {
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set("spark.oap.sql.columnar.sortmergejoin", "true")
.set("spark.oap.sql.columnar.testing", "true")
.set("spark.oap.sql.columnar.batchscan", "false")
.set(SQLConf.LEGACY_TIME_PARSER_POLICY, "legacy")
}
Loading

0 comments on commit b1887a1

Please sign in to comment.