Skip to content

Commit

Permalink
[fix](nereids) forbid create table with illegal auto partition expr (a…
Browse files Browse the repository at this point in the history
  • Loading branch information
zclllyybb authored and pull[bot] committed May 8, 2024
1 parent acd2bfd commit 4019213
Show file tree
Hide file tree
Showing 5 changed files with 101 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
import org.apache.doris.analysis.RecoverPartitionStmt;
import org.apache.doris.analysis.RecoverTableStmt;
import org.apache.doris.analysis.SinglePartitionDesc;
import org.apache.doris.analysis.SlotRef;
import org.apache.doris.analysis.TableName;
import org.apache.doris.analysis.TableRef;
import org.apache.doris.analysis.TruncateTableStmt;
Expand All @@ -69,6 +70,7 @@
import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.EnvFactory;
import org.apache.doris.catalog.EsTable;
import org.apache.doris.catalog.Function;
import org.apache.doris.catalog.HashDistributionInfo;
import org.apache.doris.catalog.HiveTable;
import org.apache.doris.catalog.Index;
Expand Down Expand Up @@ -2035,6 +2037,17 @@ public void checkAvailableCapacity(Database db) throws DdlException {
db.checkQuota();
}

private Type getChildTypeByName(String name, CreateTableStmt stmt)
throws AnalysisException {
List<Column> columns = stmt.getColumns();
for (Column col : columns) {
if (col.nameEquals(name, false)) {
return col.getType();
}
}
throw new AnalysisException("Cannot find column `" + name + "` in table's columns");
}

// Create olap table and related base index synchronously.
private void createOlapTable(Database db, CreateTableStmt stmt) throws UserException {
String tableName = stmt.getTableName();
Expand Down Expand Up @@ -2079,6 +2092,40 @@ private void createOlapTable(Database db, CreateTableStmt stmt) throws UserExcep

// create partition info
PartitionDesc partitionDesc = stmt.getPartitionDesc();

// check legality of partiton exprs
ConnectContext ctx = ConnectContext.get();
Env env = Env.getCurrentEnv();
if (ctx != null && env != null && partitionDesc != null && partitionDesc.getPartitionExprs() != null) {
for (Expr expr : partitionDesc.getPartitionExprs()) {
if (expr != null && expr instanceof FunctionCallExpr) { // test them
FunctionCallExpr func = (FunctionCallExpr) expr;
ArrayList<Expr> children = func.getChildren();
Type[] childTypes = new Type[children.size()];
for (int i = 0; i < children.size(); i++) {
if (children.get(i) instanceof LiteralExpr) {
childTypes[i] = children.get(i).getType();
} else if (children.get(i) instanceof SlotRef) {
childTypes[i] = getChildTypeByName(children.get(i).getExprName(), stmt);
} else {
throw new AnalysisException(String.format(
"partition expr %s has unrecognized parameter in slot %d", func.getExprName(), i));
}
}
Function fn = null;
try {
fn = func.getBuiltinFunction(func.getFnName().getFunction(), childTypes,
Function.CompareMode.IS_INDISTINGUISHABLE); // only for test
} catch (Exception e) {
throw new AnalysisException("partition expr " + func.getExprName() + " is illegal!");
}
if (fn == null) {
throw new AnalysisException("partition expr " + func.getExprName() + " is illegal!");
}
}
}
}

PartitionInfo partitionInfo = null;
Map<String, Long> partitionNameToId = Maps.newHashMap();
if (partitionDesc != null) {
Expand Down Expand Up @@ -2750,7 +2797,6 @@ private void createOlapTable(Database db, CreateTableStmt stmt) throws UserExcep
}
throw t;
}

}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -287,10 +287,6 @@ suite("test_auto_list_partition") {
"""
sql """ insert into stream_load_list_test_table_string_key values (1,"20"), (2," ");"""
sql """ insert into stream_load_list_test_table_string_key values (3,"!"), (4,"! ");"""
test {
sql """ insert into stream_load_list_test_table_string_key values (5,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaA")"""
exception "Partition name's length is over limit of 50."
}
result12 = sql "show partitions from stream_load_list_test_table_string_key"
logger.info("${result12}")
assertEquals(result12.size(), 4)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,8 +256,8 @@ suite("test_auto_partition_behavior") {
}

// PROHIBIT different timeunit of interval when use both auto & dynamic partition
sql "set experimental_enable_nereids_planner=true;"
test{
sql "set experimental_enable_nereids_planner=true;"
sql """
CREATE TABLE tbl3
(
Expand All @@ -280,8 +280,9 @@ suite("test_auto_partition_behavior") {
"""
exception "If support auto partition and dynamic partition at same time, they must have the same interval unit."
}

sql "set experimental_enable_nereids_planner=false;"
test{
sql "set experimental_enable_nereids_planner=false;"
sql """
CREATE TABLE tbl3
(
Expand Down Expand Up @@ -324,4 +325,53 @@ suite("test_auto_partition_behavior") {

exception "Partition name's length is over limit of 50."
}

// illegal partiton definetion
sql "set experimental_enable_nereids_planner=false;"
test{
sql """
create table illegal(
k0 datetime(6) NOT null,
k1 datetime(6) NOT null
)
auto partition by range date_trunc(k0, k1, 'hour')
(
)
DISTRIBUTED BY HASH(`k0`) BUCKETS 2
properties("replication_num" = "1");
"""
exception "auto create partition only support one slotRef in function expr"
}

sql "set experimental_enable_nereids_planner=true;"
sql "set enable_fallback_to_original_planner=false;"
test{
sql """
create table illegal(
k0 datetime(6) NOT null,
k1 datetime(6) NOT null
)
auto partition by range date_trunc(k0, k1, 'hour')
(
)
DISTRIBUTED BY HASH(`k0`) BUCKETS 2
properties("replication_num" = "1");
"""
exception "partition expr date_trunc is illegal!"
}
// test displacement of partition function
test{
sql """
create table illegal(
k0 datetime(6) NOT null,
k1 int NOT null
)
auto partition by range date_trunc(k1, 'hour')
(
)
DISTRIBUTED BY HASH(`k0`) BUCKETS 2
properties("replication_num" = "1");
"""
exception "partition expr date_trunc is illegal!"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ suite("multi_thread_load", "p1,nonConcurrent") { // stress case should use resou
def sout = new StringBuilder(), serr = new StringBuilder()
proc.consumeProcessOutput(sout, serr)
proc.waitForOrKill(7200000)
// logger.info("std out: " + sout + "std err: " + serr)
}
}

Expand Down Expand Up @@ -152,10 +151,6 @@ suite("multi_thread_load", "p1,nonConcurrent") { // stress case should use resou
proc.waitForOrKill(600000) // 10 minutes
}

// for (int i = 0; i < data_count; i++) {
// logger.info("try to run " + i + " : " + cm_list[i])
// load_threads.add(Thread.startDaemon{concurrent_load(cm_list[i])})
// }
load_threads.add(Thread.startDaemon{concurrent_load(cm_list[0])})
load_threads.add(Thread.startDaemon{concurrent_load(cm_list[1])})
load_threads.add(Thread.startDaemon{concurrent_load(cm_list[2])})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,12 +179,12 @@ suite("stress_test_high_concurrency_load") {
}

def row_count_range = sql """select count(*) from ${tb_name2};"""
assertTrue(cur_rows * data_count == row_count_range[0][0])
assertTrue(cur_rows * data_count == row_count_range[0][0], "${cur_rows * data_count}, ${row_count_range[0][0]}")
def partition_res_range = sql """show partitions from ${tb_name2} order by PartitionName;"""
for (int i = 0; i < partition_res_range.size(); i++) {
for (int j = i+1; j < partition_res_range.size(); j++) {
if (partition_res_range[i][6] == partition_res_range[j][6]) {
assertTrue(false)
assertTrue(false, "$i, $j")
}
}
}
Expand Down

0 comments on commit 4019213

Please sign in to comment.