diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java index a3bf9f4431956b0..bc2ffc40efc29d5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java @@ -49,6 +49,7 @@ import org.apache.doris.analysis.RecoverPartitionStmt; import org.apache.doris.analysis.RecoverTableStmt; import org.apache.doris.analysis.SinglePartitionDesc; +import org.apache.doris.analysis.SlotRef; import org.apache.doris.analysis.TableName; import org.apache.doris.analysis.TableRef; import org.apache.doris.analysis.TruncateTableStmt; @@ -69,6 +70,7 @@ import org.apache.doris.catalog.Env; import org.apache.doris.catalog.EnvFactory; import org.apache.doris.catalog.EsTable; +import org.apache.doris.catalog.Function; import org.apache.doris.catalog.HashDistributionInfo; import org.apache.doris.catalog.HiveTable; import org.apache.doris.catalog.Index; @@ -2035,6 +2037,17 @@ public void checkAvailableCapacity(Database db) throws DdlException { db.checkQuota(); } + private Type getChildTypeByName(String name, CreateTableStmt stmt) + throws AnalysisException { + List columns = stmt.getColumns(); + for (Column col : columns) { + if (col.nameEquals(name, false)) { + return col.getType(); + } + } + throw new AnalysisException("Cannot find column `" + name + "` in table's columns"); + } + // Create olap table and related base index synchronously. private void createOlapTable(Database db, CreateTableStmt stmt) throws UserException { String tableName = stmt.getTableName(); @@ -2079,6 +2092,40 @@ private void createOlapTable(Database db, CreateTableStmt stmt) throws UserExcep // create partition info PartitionDesc partitionDesc = stmt.getPartitionDesc(); + + // check legality of partiton exprs + ConnectContext ctx = ConnectContext.get(); + Env env = Env.getCurrentEnv(); + if (ctx != null && env != null && partitionDesc != null && partitionDesc.getPartitionExprs() != null) { + for (Expr expr : partitionDesc.getPartitionExprs()) { + if (expr != null && expr instanceof FunctionCallExpr) { // test them + FunctionCallExpr func = (FunctionCallExpr) expr; + ArrayList children = func.getChildren(); + Type[] childTypes = new Type[children.size()]; + for (int i = 0; i < children.size(); i++) { + if (children.get(i) instanceof LiteralExpr) { + childTypes[i] = children.get(i).getType(); + } else if (children.get(i) instanceof SlotRef) { + childTypes[i] = getChildTypeByName(children.get(i).getExprName(), stmt); + } else { + throw new AnalysisException(String.format( + "partition expr %s has unrecognized parameter in slot %d", func.getExprName(), i)); + } + } + Function fn = null; + try { + fn = func.getBuiltinFunction(func.getFnName().getFunction(), childTypes, + Function.CompareMode.IS_INDISTINGUISHABLE); // only for test + } catch (Exception e) { + throw new AnalysisException("partition expr " + func.getExprName() + " is illegal!"); + } + if (fn == null) { + throw new AnalysisException("partition expr " + func.getExprName() + " is illegal!"); + } + } + } + } + PartitionInfo partitionInfo = null; Map partitionNameToId = Maps.newHashMap(); if (partitionDesc != null) { @@ -2750,7 +2797,6 @@ private void createOlapTable(Database db, CreateTableStmt stmt) throws UserExcep } throw t; } - } } diff --git a/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy b/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy index f90f0251705cb1a..af70ca35a877866 100644 --- a/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy +++ b/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy @@ -287,10 +287,6 @@ suite("test_auto_list_partition") { """ sql """ insert into stream_load_list_test_table_string_key values (1,"20"), (2," ");""" sql """ insert into stream_load_list_test_table_string_key values (3,"!"), (4,"! ");""" - test { - sql """ insert into stream_load_list_test_table_string_key values (5,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaA")""" - exception "Partition name's length is over limit of 50." - } result12 = sql "show partitions from stream_load_list_test_table_string_key" logger.info("${result12}") assertEquals(result12.size(), 4) diff --git a/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy b/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy index af37cf8cf08c3de..02623bb5c974aab 100644 --- a/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy +++ b/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy @@ -256,8 +256,8 @@ suite("test_auto_partition_behavior") { } // PROHIBIT different timeunit of interval when use both auto & dynamic partition + sql "set experimental_enable_nereids_planner=true;" test{ - sql "set experimental_enable_nereids_planner=true;" sql """ CREATE TABLE tbl3 ( @@ -280,8 +280,9 @@ suite("test_auto_partition_behavior") { """ exception "If support auto partition and dynamic partition at same time, they must have the same interval unit." } + + sql "set experimental_enable_nereids_planner=false;" test{ - sql "set experimental_enable_nereids_planner=false;" sql """ CREATE TABLE tbl3 ( @@ -324,4 +325,53 @@ suite("test_auto_partition_behavior") { exception "Partition name's length is over limit of 50." } + + // illegal partiton definetion + sql "set experimental_enable_nereids_planner=false;" + test{ + sql """ + create table illegal( + k0 datetime(6) NOT null, + k1 datetime(6) NOT null + ) + auto partition by range date_trunc(k0, k1, 'hour') + ( + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 2 + properties("replication_num" = "1"); + """ + exception "auto create partition only support one slotRef in function expr" + } + + sql "set experimental_enable_nereids_planner=true;" + sql "set enable_fallback_to_original_planner=false;" + test{ + sql """ + create table illegal( + k0 datetime(6) NOT null, + k1 datetime(6) NOT null + ) + auto partition by range date_trunc(k0, k1, 'hour') + ( + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 2 + properties("replication_num" = "1"); + """ + exception "partition expr date_trunc is illegal!" + } + // test displacement of partition function + test{ + sql """ + create table illegal( + k0 datetime(6) NOT null, + k1 int NOT null + ) + auto partition by range date_trunc(k1, 'hour') + ( + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 2 + properties("replication_num" = "1"); + """ + exception "partition expr date_trunc is illegal!" + } } diff --git a/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy b/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy index 4dcec2c91cfef77..f515c25b0bec114 100644 --- a/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy +++ b/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy @@ -74,7 +74,6 @@ suite("multi_thread_load", "p1,nonConcurrent") { // stress case should use resou def sout = new StringBuilder(), serr = new StringBuilder() proc.consumeProcessOutput(sout, serr) proc.waitForOrKill(7200000) - // logger.info("std out: " + sout + "std err: " + serr) } } @@ -152,10 +151,6 @@ suite("multi_thread_load", "p1,nonConcurrent") { // stress case should use resou proc.waitForOrKill(600000) // 10 minutes } - // for (int i = 0; i < data_count; i++) { - // logger.info("try to run " + i + " : " + cm_list[i]) - // load_threads.add(Thread.startDaemon{concurrent_load(cm_list[i])}) - // } load_threads.add(Thread.startDaemon{concurrent_load(cm_list[0])}) load_threads.add(Thread.startDaemon{concurrent_load(cm_list[1])}) load_threads.add(Thread.startDaemon{concurrent_load(cm_list[2])}) diff --git a/regression-test/suites/partition_p2/auto_partition/high_concur_load/stress_test_high_concurrency_load.groovy b/regression-test/suites/partition_p2/auto_partition/high_concur_load/stress_test_high_concurrency_load.groovy index 287b0dc00e3505a..abe71301500427a 100644 --- a/regression-test/suites/partition_p2/auto_partition/high_concur_load/stress_test_high_concurrency_load.groovy +++ b/regression-test/suites/partition_p2/auto_partition/high_concur_load/stress_test_high_concurrency_load.groovy @@ -179,12 +179,12 @@ suite("stress_test_high_concurrency_load") { } def row_count_range = sql """select count(*) from ${tb_name2};""" - assertTrue(cur_rows * data_count == row_count_range[0][0]) + assertTrue(cur_rows * data_count == row_count_range[0][0], "${cur_rows * data_count}, ${row_count_range[0][0]}") def partition_res_range = sql """show partitions from ${tb_name2} order by PartitionName;""" for (int i = 0; i < partition_res_range.size(); i++) { for (int j = i+1; j < partition_res_range.size(); j++) { if (partition_res_range[i][6] == partition_res_range[j][6]) { - assertTrue(false) + assertTrue(false, "$i, $j") } } }