diff --git a/src/query/ast/src/ast/statements/copy.rs b/src/query/ast/src/ast/statements/copy.rs index 88d483037ca0..3cfaac47e08f 100644 --- a/src/query/ast/src/ast/statements/copy.rs +++ b/src/query/ast/src/ast/statements/copy.rs @@ -342,7 +342,6 @@ pub struct UriLocation { pub protocol: String, pub name: String, pub path: String, - pub part_prefix: String, pub connection: Connection, } @@ -351,23 +350,17 @@ impl UriLocation { protocol: String, name: String, path: String, - part_prefix: String, conns: BTreeMap, ) -> Self { Self { protocol, name, path, - part_prefix, connection: Connection::new(conns), } } - pub fn from_uri( - uri: String, - part_prefix: String, - conns: BTreeMap, - ) -> Result { + pub fn from_uri(uri: String, conns: BTreeMap) -> Result { // fs location is not a valid url, let's check it in advance. if let Some(path) = uri.strip_prefix("fs://") { if !path.starts_with('/') { @@ -380,7 +373,6 @@ impl UriLocation { "fs".to_string(), "".to_string(), path.to_string(), - part_prefix, BTreeMap::default(), )); } @@ -411,7 +403,6 @@ impl UriLocation { protocol, name, path, - part_prefix, connection: Connection::new(conns), }) } @@ -428,9 +419,6 @@ impl Display for UriLocation { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!(f, "'{}://{}{}'", self.protocol, self.name, self.path)?; write!(f, "{}", self.connection)?; - if !self.part_prefix.is_empty() { - write!(f, " LOCATION_PREFIX = '{}'", self.part_prefix)?; - } Ok(()) } } diff --git a/src/query/ast/src/parser/stage.rs b/src/query/ast/src/parser/stage.rs index f07189c55f56..8add065bd990 100644 --- a/src/query/ast/src/parser/stage.rs +++ b/src/query/ast/src/parser/stage.rs @@ -221,11 +221,10 @@ pub fn string_location(i: Input) -> IResult { rule! { #literal_string ~ (CONNECTION ~ ^"=" ~ ^#connection_options ~ ","?)? - ~ (LOCATION_PREFIX ~ ^"=" ~ ^#literal_string ~ ","?)? }, - |(location, connection_opts, location_prefix)| { + |(location, connection_opts)| { if let Some(stripped) = location.strip_prefix('@') { - if location_prefix.is_none() && connection_opts.is_none() { + if connection_opts.is_none() { Ok(FileLocation::Stage(stripped.to_string())) } else { Err(nom::Err::Failure(ErrorKind::Other( @@ -233,16 +232,11 @@ pub fn string_location(i: Input) -> IResult { ))) } } else { - let part_prefix = if let Some((_, _, p, _)) = location_prefix { - p - } else { - "".to_string() - }; // fs location is not a valid url, let's check it in advance. let conns = connection_opts.map(|v| v.2).unwrap_or_default(); - let uri = UriLocation::from_uri(location, part_prefix, conns) + let uri = UriLocation::from_uri(location, conns) .map_err(|_| nom::Err::Failure(ErrorKind::Other("invalid uri")))?; Ok(FileLocation::Uri(uri)) } diff --git a/src/query/ast/tests/it/parser.rs b/src/query/ast/tests/it/parser.rs index 48db7dc4f6ab..e249ca198fa5 100644 --- a/src/query/ast/tests/it/parser.rs +++ b/src/query/ast/tests/it/parser.rs @@ -129,11 +129,6 @@ fn test_statement() { r#"create table a.b like c.d;"#, r#"create table t like t2 engine = memory;"#, r#"create table if not exists a.b (a int) 's3://testbucket/admin/data/' connection=(aws_key_id='minioadmin' aws_secret_key='minioadmin' endpoint_url='http://127.0.0.1:9900');"#, - r#" - create table if not exists a.b (a int) 's3://testbucket/admin/data/' - connection=(aws_key_id='minioadmin' aws_secret_key='minioadmin' endpoint_url='http://127.0.0.1:9900') - location_prefix = 'db'; - "#, r#"truncate table a;"#, r#"truncate table "a".b;"#, r#"drop table a;"#, diff --git a/src/query/ast/tests/it/testdata/stmt-error.txt b/src/query/ast/tests/it/testdata/stmt-error.txt index 49cf494c9d72..5ee14c7618a8 100644 --- a/src/query/ast/tests/it/testdata/stmt-error.txt +++ b/src/query/ast/tests/it/testdata/stmt-error.txt @@ -321,7 +321,7 @@ error: --> SQL:1:38 | 1 | COPY INTO mytable FROM 's3://bucket' CONECTION= (); - | ^^^^^^^^^ unexpected `CONECTION`, expecting `CONNECTION`, `ON_ERROR`, `RETURN_FAILED_ONLY`, `LOCATION_PREFIX`, `FORMAT`, `VALIDATION_MODE`, `FORCE`, `PATTERN`, `FILES`, `PURGE`, `SIZE_LIMIT`, `FILE_FORMAT`, `MAX_FILES`, `DISABLE_VARIANT_CHECK`, `SPLIT_SIZE`, or `;` + | ^^^^^^^^^ unexpected `CONECTION`, expecting `CONNECTION`, `ON_ERROR`, `RETURN_FAILED_ONLY`, `FORMAT`, `VALIDATION_MODE`, `FORCE`, `PATTERN`, `FILES`, `PURGE`, `SIZE_LIMIT`, `FILE_FORMAT`, `MAX_FILES`, `DISABLE_VARIANT_CHECK`, `SPLIT_SIZE`, or `;` ---------- Input ---------- diff --git a/src/query/ast/tests/it/testdata/stmt.txt b/src/query/ast/tests/it/testdata/stmt.txt index 20337dfb9456..27869c0c54d2 100644 --- a/src/query/ast/tests/it/testdata/stmt.txt +++ b/src/query/ast/tests/it/testdata/stmt.txt @@ -2759,81 +2759,6 @@ CreateTable( protocol: "s3", name: "testbucket", path: "/admin/data/", - part_prefix: "", - connection: Connection { - visited_keys: {}, - conns: { - "aws_key_id": "minioadmin", - "aws_secret_key": "minioadmin", - "endpoint_url": "http://127.0.0.1:9900", - }, - }, - }, - ), - cluster_by: None, - table_options: {}, - as_query: None, - table_type: Normal, - }, -) - - ----------- Input ---------- -create table if not exists a.b (a int) 's3://testbucket/admin/data/' - connection=(aws_key_id='minioadmin' aws_secret_key='minioadmin' endpoint_url='http://127.0.0.1:9900') - location_prefix = 'db'; ----------- Output --------- -CREATE TABLE IF NOT EXISTS a.b (a Int32) 's3://testbucket/admin/data/' CONNECTION = ( aws_key_id = 'minioadmin', aws_secret_key = 'minioadmin', endpoint_url = 'http://127.0.0.1:9900' ) LOCATION_PREFIX = 'db' ----------- AST ------------ -CreateTable( - CreateTableStmt { - create_option: CreateIfNotExists, - catalog: None, - database: Some( - Identifier { - span: Some( - 27..28, - ), - name: "a", - quote: None, - ident_type: None, - }, - ), - table: Identifier { - span: Some( - 29..30, - ), - name: "b", - quote: None, - ident_type: None, - }, - source: Some( - Columns( - [ - ColumnDefinition { - name: Identifier { - span: Some( - 32..33, - ), - name: "a", - quote: None, - ident_type: None, - }, - data_type: Int32, - expr: None, - comment: None, - }, - ], - None, - ), - ), - engine: None, - uri_location: Some( - UriLocation { - protocol: "s3", - name: "testbucket", - path: "/admin/data/", - part_prefix: "db", connection: Connection { visited_keys: {}, conns: { @@ -11299,7 +11224,6 @@ CreateStage( protocol: "s3", name: "load", path: "/files/", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -11344,7 +11268,6 @@ CreateStage( protocol: "s3", name: "load", path: "/files/", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -11389,7 +11312,6 @@ CreateStage( protocol: "azblob", name: "load", path: "/files/", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -11434,7 +11356,6 @@ CreateStage( protocol: "azblob", name: "load", path: "/files/", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -14050,7 +13971,6 @@ CopyIntoTable( protocol: "s3", name: "mybucket", path: "/data.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: {}, @@ -14127,7 +14047,6 @@ CopyIntoTable( protocol: "s3", name: "mybucket", path: "/data.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: {}, @@ -14206,7 +14125,6 @@ CopyIntoTable( protocol: "s3", name: "mybucket", path: "/data.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -14287,7 +14205,6 @@ CopyIntoTable( protocol: "s3", name: "mybucket", path: "/data.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -14358,7 +14275,6 @@ CopyIntoTable( protocol: "https", name: "127.0.0.1:9900", path: "/", - part_prefix: "", connection: Connection { visited_keys: {}, conns: {}, @@ -14414,7 +14330,6 @@ CopyIntoTable( protocol: "https", name: "127.0.0.1", path: "/", - part_prefix: "", connection: Connection { visited_keys: {}, conns: {}, @@ -14562,7 +14477,6 @@ CopyIntoLocation( protocol: "s3", name: "mybucket", path: "/data.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: {}, @@ -14728,7 +14642,6 @@ CopyIntoTable( protocol: "s3", name: "mybucket", path: "/data.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -15009,7 +14922,6 @@ CopyIntoTable( protocol: "fs", name: "", path: "/path/to/data.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: {}, @@ -15084,7 +14996,6 @@ CopyIntoTable( protocol: "s3", name: "databend", path: "/books.csv", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { @@ -17249,7 +17160,6 @@ Query( protocol: "s3", name: "test", path: "/bucket", - part_prefix: "", connection: Connection { visited_keys: {}, conns: {}, @@ -22350,7 +22260,6 @@ AttachTable( protocol: "s3", name: "a", path: "/", - part_prefix: "", connection: Connection { visited_keys: {}, conns: { diff --git a/src/query/ee/src/attach_table/handler.rs b/src/query/ee/src/attach_table/handler.rs index df3a61d6e687..dd566043f61e 100644 --- a/src/query/ee/src/attach_table/handler.rs +++ b/src/query/ee/src/attach_table/handler.rs @@ -75,7 +75,6 @@ impl AttachTableHandler for RealAttachTableHandler { schema: Arc::new(snapshot.schema.clone()), engine: plan.engine.to_string(), storage_params: plan.storage_params.clone(), - part_prefix: plan.part_prefix.clone(), options, default_cluster_key: None, field_comments, diff --git a/src/query/ee/tests/it/inverted_index/pruning.rs b/src/query/ee/tests/it/inverted_index/pruning.rs index 3bca0767a98d..6a21fa3da055 100644 --- a/src/query/ee/tests/it/inverted_index/pruning.rs +++ b/src/query/ee/tests/it/inverted_index/pruning.rs @@ -112,7 +112,6 @@ async fn test_block_pruner() -> Result<()> { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ (FUSE_OPT_KEY_ROW_PER_BLOCK.to_owned(), num_blocks_opt), (FUSE_OPT_KEY_BLOCK_PER_SEGMENT.to_owned(), "5".to_owned()), diff --git a/src/query/service/src/interpreters/interpreter_table_create.rs b/src/query/service/src/interpreters/interpreter_table_create.rs index 60679aa4a7b8..f15937f892cc 100644 --- a/src/query/service/src/interpreters/interpreter_table_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_create.rs @@ -379,7 +379,6 @@ impl CreateTableInterpreter { schema: schema.clone(), engine: self.plan.engine.to_string(), storage_params: self.plan.storage_params.clone(), - part_prefix: self.plan.part_prefix.clone(), options, engine_options: self.plan.engine_options.clone(), default_cluster_key: None, diff --git a/src/query/service/src/pipelines/processors/transforms/transform_recursive_cte_source.rs b/src/query/service/src/pipelines/processors/transforms/transform_recursive_cte_source.rs index a0e9c43d3b29..e86b7b10c3b6 100644 --- a/src/query/service/src/pipelines/processors/transforms/transform_recursive_cte_source.rs +++ b/src/query/service/src/pipelines/processors/transforms/transform_recursive_cte_source.rs @@ -301,7 +301,6 @@ async fn create_memory_table_for_cte_scan( engine: Engine::Memory, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: Default::default(), field_comments: vec![], cluster_key: None, diff --git a/src/query/service/src/table_functions/infer_schema/parquet.rs b/src/query/service/src/table_functions/infer_schema/parquet.rs index a14e068592de..de814c75c395 100644 --- a/src/query/service/src/table_functions/infer_schema/parquet.rs +++ b/src/query/service/src/table_functions/infer_schema/parquet.rs @@ -78,11 +78,8 @@ impl AsyncSource for ParquetInferSchemaSource { FileLocation::Stage(location.to_string()) } else if let Some(connection_name) = &self.args_parsed.connection_name { let conn = self.ctx.get_connection(connection_name).await?; - let uri = UriLocation::from_uri( - self.args_parsed.location.clone(), - "".to_string(), - conn.storage_params, - )?; + let uri = + UriLocation::from_uri(self.args_parsed.location.clone(), conn.storage_params)?; let proto = conn.storage_type.parse::()?; if proto != uri.protocol.parse::()? { return Err(ErrorCode::BadArguments(format!( @@ -92,11 +89,8 @@ impl AsyncSource for ParquetInferSchemaSource { } FileLocation::Uri(uri) } else { - let uri = UriLocation::from_uri( - self.args_parsed.location.clone(), - "".to_string(), - BTreeMap::default(), - )?; + let uri = + UriLocation::from_uri(self.args_parsed.location.clone(), BTreeMap::default())?; FileLocation::Uri(uri) }; let (stage_info, path) = resolve_file_location(self.ctx.as_ref(), &file_location).await?; diff --git a/src/query/service/src/test_kits/fixture.rs b/src/query/service/src/test_kits/fixture.rs index 09fb1534168a..2f5f7959e2b5 100644 --- a/src/query/service/src/test_kits/fixture.rs +++ b/src/query/service/src/test_kits/fixture.rs @@ -337,7 +337,6 @@ impl TestFixture { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ // database id is required for FUSE (OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()), @@ -362,7 +361,6 @@ impl TestFixture { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ // database id is required for FUSE (OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()), @@ -398,7 +396,6 @@ impl TestFixture { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ // database id is required for FUSE (OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()), @@ -434,7 +431,6 @@ impl TestFixture { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ // database id is required for FUSE (OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()), @@ -479,7 +475,6 @@ impl TestFixture { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ // database id is required for FUSE (OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()), diff --git a/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs b/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs index 989911ae584b..34b7747e7c8a 100644 --- a/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs +++ b/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs @@ -74,7 +74,6 @@ fn create_table_plan(fixture: &TestFixture, format: &str) -> CreateTablePlan { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ // database id is required for FUSE (OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()), diff --git a/src/query/service/tests/it/storages/fuse/operations/clustering.rs b/src/query/service/tests/it/storages/fuse/operations/clustering.rs index 4f319fcd60d7..39183baaf4cb 100644 --- a/src/query/service/tests/it/storages/fuse/operations/clustering.rs +++ b/src/query/service/tests/it/storages/fuse/operations/clustering.rs @@ -48,7 +48,6 @@ async fn test_fuse_alter_table_cluster_key() -> databend_common_exception::Resul engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ // database id is required for FUSE (OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()), diff --git a/src/query/service/tests/it/storages/fuse/pruning.rs b/src/query/service/tests/it/storages/fuse/pruning.rs index d9a204ff4be8..58a3d58110ab 100644 --- a/src/query/service/tests/it/storages/fuse/pruning.rs +++ b/src/query/service/tests/it/storages/fuse/pruning.rs @@ -106,7 +106,6 @@ async fn test_block_pruner() -> Result<()> { engine: Engine::Fuse, engine_options: Default::default(), storage_params: None, - part_prefix: "".to_string(), options: [ (FUSE_OPT_KEY_ROW_PER_BLOCK.to_owned(), num_blocks_opt), (FUSE_OPT_KEY_BLOCK_PER_SEGMENT.to_owned(), "1".to_owned()), diff --git a/src/query/sql/src/planner/binder/ddl/catalog.rs b/src/query/sql/src/planner/binder/ddl/catalog.rs index 340cc7345eb2..9d2b9290e453 100644 --- a/src/query/sql/src/planner/binder/ddl/catalog.rs +++ b/src/query/sql/src/planner/binder/ddl/catalog.rs @@ -192,7 +192,7 @@ async fn parse_hive_catalog_url( return Ok(None); }; - let mut location = UriLocation::from_uri(uri, "".to_string(), options)?; + let mut location = UriLocation::from_uri(uri, options)?; let sp = parse_storage_params_from_uri( &mut location, Some(ctx.as_ref()), diff --git a/src/query/sql/src/planner/binder/ddl/connection.rs b/src/query/sql/src/planner/binder/ddl/connection.rs index 1b27823ebdc5..7f802f56b734 100644 --- a/src/query/sql/src/planner/binder/ddl/connection.rs +++ b/src/query/sql/src/planner/binder/ddl/connection.rs @@ -31,7 +31,6 @@ impl Binder { stmt.storage_type.clone(), "".to_string(), "/".to_string(), - "".to_string(), stmt.storage_params.clone(), ); parse_storage_params_from_uri(&mut location, None, "when CREATE CONNECTION").await?; diff --git a/src/query/sql/src/planner/binder/ddl/stage.rs b/src/query/sql/src/planner/binder/ddl/stage.rs index 1a331973d871..2036ba4f8c2e 100644 --- a/src/query/sql/src/planner/binder/ddl/stage.rs +++ b/src/query/sql/src/planner/binder/ddl/stage.rs @@ -78,7 +78,6 @@ impl Binder { protocol: uri.protocol.clone(), name: uri.name.clone(), path: uri.path.clone(), - part_prefix: uri.part_prefix.clone(), connection: uri.connection.clone(), }; diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index 394c2323ff1e..0799b19b7186 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -448,13 +448,12 @@ impl Binder { )?; } - let (mut storage_params, part_prefix) = match (uri_location, engine) { + let mut storage_params = match (uri_location, engine) { (Some(uri), Engine::Fuse) => { let mut uri = UriLocation { protocol: uri.protocol.clone(), name: uri.name.clone(), path: uri.path.clone(), - part_prefix: uri.part_prefix.clone(), connection: uri.connection.clone(), }; let sp = parse_storage_params_from_uri( @@ -467,24 +466,17 @@ impl Binder { // create a temporary op to check if params is correct let data_operator = DataOperator::try_create(&sp).await?; - // Path ends with "/" means it's a directory. - let fp = if uri.path.ends_with('/') { - uri.part_prefix.clone() - } else { - "".to_string() - }; - // Verify essential privileges. // The permission check might fail for reasons other than the permissions themselves, // such as network communication issues. verify_external_location_privileges(data_operator.operator()).await?; - (Some(sp), fp) + Some(sp) } (Some(uri), _) => Err(ErrorCode::BadArguments(format!( "Incorrect CREATE query: CREATE TABLE with external location is only supported for FUSE engine, but got {:?} for {:?}", engine, uri )))?, - _ => (None, "".to_string()), + _ => None, }; match table_type { @@ -694,7 +686,6 @@ impl Binder { engine, engine_options, storage_params, - part_prefix, options, field_comments, cluster_key, @@ -756,13 +747,6 @@ impl Binder { // create a temporary op to check if params is correct DataOperator::try_create(&sp).await?; - // Path ends with "/" means it's a directory. - let part_prefix = if uri.path.ends_with('/') { - uri.part_prefix.clone() - } else { - "".to_string() - }; - Ok(Plan::CreateTable(Box::new(CreateTablePlan { create_option: CreateOption::Create, tenant: self.ctx.get_tenant(), @@ -773,7 +757,6 @@ impl Binder { engine: Engine::Fuse, engine_options: BTreeMap::new(), storage_params: Some(sp), - part_prefix, options, field_comments: vec![], cluster_key: None, diff --git a/src/query/sql/src/planner/binder/location.rs b/src/query/sql/src/planner/binder/location.rs index d26ea6fde018..0a4973fce1c4 100644 --- a/src/query/sql/src/planner/binder/location.rs +++ b/src/query/sql/src/planner/binder/location.rs @@ -594,11 +594,7 @@ pub async fn get_storage_params_from_options( let mut location = if let Some(connection) = connection { let connection = ctx.get_connection(connection).await?; - let location = UriLocation::from_uri( - location.to_string(), - "".to_string(), - connection.storage_params, - )?; + let location = UriLocation::from_uri(location.to_string(), connection.storage_params)?; if location.protocol.to_lowercase() != connection.storage_type { return Err(ErrorCode::BadArguments(format!( "Incorrect CREATE query: protocol in location {:?} is not equal to connection {:?}", @@ -607,7 +603,7 @@ pub async fn get_storage_params_from_options( }; location } else { - UriLocation::from_uri(location.to_string(), "".to_string(), BTreeMap::new())? + UriLocation::from_uri(location.to_string(), BTreeMap::new())? }; let sp = parse_storage_params_from_uri( &mut location, diff --git a/src/query/sql/src/planner/plans/ddl/table.rs b/src/query/sql/src/planner/plans/ddl/table.rs index c7ac6b645208..6a1739741a59 100644 --- a/src/query/sql/src/planner/plans/ddl/table.rs +++ b/src/query/sql/src/planner/plans/ddl/table.rs @@ -50,7 +50,6 @@ pub struct CreateTablePlan { pub engine: Engine, pub engine_options: TableOptions, pub storage_params: Option, - pub part_prefix: String, pub options: TableOptions, pub field_comments: Vec, pub cluster_key: Option, diff --git a/src/query/sql/src/planner/semantic/type_check.rs b/src/query/sql/src/planner/semantic/type_check.rs index 43538c30792f..479ed886c87d 100644 --- a/src/query/sql/src/planner/semantic/type_check.rs +++ b/src/query/sql/src/planner/semantic/type_check.rs @@ -3705,11 +3705,7 @@ impl<'a> TypeChecker<'a> { let file_location = match udf_definition.code.strip_prefix('@') { Some(location) => FileLocation::Stage(location.to_string()), None => { - let uri = UriLocation::from_uri( - udf_definition.code.clone(), - "".to_string(), - BTreeMap::default(), - ); + let uri = UriLocation::from_uri(udf_definition.code.clone(), BTreeMap::default()); match uri { Ok(uri) => FileLocation::Uri(uri), diff --git a/src/query/sql/tests/location.rs b/src/query/sql/tests/location.rs index f4f2ce4dda10..106cca1ed476 100644 --- a/src/query/sql/tests/location.rs +++ b/src/query/sql/tests/location.rs @@ -55,7 +55,6 @@ async fn test_parse_uri_location() -> Result<()> { "ipfs".to_string(), "".to_string(), "too-naive".to_string(), - "".to_string(), vec![("endpoint_url", "ipfs.filebase.io")] .into_iter() .map(|(k, v)| (k.to_string(), v.to_string())) @@ -75,7 +74,6 @@ async fn test_parse_uri_location() -> Result<()> { "oss".to_string(), "zhen".to_string(), "/highest/".to_string(), - "".to_string(), vec![ ("endpoint_url", "https://oss-cn-litang.example.com"), ("access_key_id", "dzin"), @@ -126,7 +124,6 @@ async fn test_parse_uri_location() -> Result<()> { "ipfs".to_string(), "".to_string(), "too-simple".to_string(), - "".to_string(), BTreeMap::new(), ), ( @@ -143,7 +140,6 @@ async fn test_parse_uri_location() -> Result<()> { "ipfs".to_string(), "".to_string(), "too-naive".to_string(), - "".to_string(), vec![("endpoint_url", "https://ipfs.filebase.io")] .into_iter() .map(|(k, v)| (k.to_string(), v.to_string())) @@ -163,7 +159,6 @@ async fn test_parse_uri_location() -> Result<()> { "s3".to_string(), "test".to_string(), "/tmp/".to_string(), - "".to_string(), [ ("access_key_id", "access_key_id"), ("secret_access_key", "secret_access_key"), @@ -198,7 +193,6 @@ async fn test_parse_uri_location() -> Result<()> { "s3".to_string(), "test".to_string(), "/tmp/".to_string(), - "".to_string(), [ ("aws_key_id", "access_key_id"), ("aws_secret_key", "secret_access_key"), @@ -233,7 +227,6 @@ async fn test_parse_uri_location() -> Result<()> { "s3".to_string(), "test".to_string(), "/tmp/".to_string(), - "".to_string(), [ ("aws_key_id", "access_key_id"), ("aws_secret_key", "secret_access_key"), @@ -268,7 +261,6 @@ async fn test_parse_uri_location() -> Result<()> { "s3".to_string(), "test".to_string(), "/tmp/".to_string(), - "".to_string(), [("role_arn", "aws::iam::xxxx"), ("region", "us-east-2")] .iter() .map(|(k, v)| (k.to_string(), v.to_string())) @@ -298,7 +290,6 @@ async fn test_parse_uri_location() -> Result<()> { "fs".to_string(), "".to_string(), "/tmp/".to_string(), - "".to_string(), BTreeMap::default(), ), ( @@ -314,7 +305,6 @@ async fn test_parse_uri_location() -> Result<()> { "gcs".to_string(), "example".to_string(), "/tmp/".to_string(), - "".to_string(), vec![("credential", "gcs.credential")] .into_iter() .map(|(k, v)| (k.to_string(), v.to_string())) @@ -336,7 +326,6 @@ async fn test_parse_uri_location() -> Result<()> { "https".to_string(), "example.com".to_string(), "/tmp.csv".to_string(), - "".to_string(), BTreeMap::default(), ), ( @@ -353,7 +342,6 @@ async fn test_parse_uri_location() -> Result<()> { "https".to_string(), "example.com".to_string(), "/tmp-{a,b,c}.csv".to_string(), - "".to_string(), BTreeMap::default(), ), ( @@ -373,7 +361,6 @@ async fn test_parse_uri_location() -> Result<()> { "https".to_string(), "example.com".to_string(), "/tmp-[11-15].csv".to_string(), - "".to_string(), BTreeMap::default(), ), ( @@ -399,7 +386,6 @@ async fn test_parse_uri_location() -> Result<()> { "webhdfs".to_string(), "example.com".to_string(), "/path/to/dir/".to_string(), - "".to_string(), vec![("https", "TrUE"), ("delegation", "databendthebest")] .into_iter() .map(|(k, v)| (k.to_string(), v.to_string())) diff --git a/src/query/storages/fuse/src/fuse_table.rs b/src/query/storages/fuse/src/fuse_table.rs index de469a48fc53..521d0a769cbc 100644 --- a/src/query/storages/fuse/src/fuse_table.rs +++ b/src/query/storages/fuse/src/fuse_table.rs @@ -219,10 +219,13 @@ impl FuseTable { .and_then(|s| s.parse::().ok()) .unwrap_or(BloomIndexColumns::All); - let part_prefix = table_info.meta.part_prefix.clone(); + if !table_info.meta.part_prefix.is_empty() { + return Err(ErrorCode::StorageOther( + "Location_prefix no longer supported. The last version that supports it is: https://github.com/databendlabs/databend/releases/tag/v1.2.653-nightly", + )); + } - let meta_location_generator = - TableMetaLocationGenerator::with_prefix(storage_prefix).with_part_prefix(part_prefix); + let meta_location_generator = TableMetaLocationGenerator::with_prefix(storage_prefix); Ok(Box::new(FuseTable { table_info, diff --git a/src/query/storages/fuse/src/io/locations.rs b/src/query/storages/fuse/src/io/locations.rs index f3f782d3110d..3de700ba04fe 100644 --- a/src/query/storages/fuse/src/io/locations.rs +++ b/src/query/storages/fuse/src/io/locations.rs @@ -55,9 +55,6 @@ static SNAPSHOT_STATISTICS_V3: TableSnapshotStatisticsVersion = pub struct TableMetaLocationGenerator { prefix: String, - // TODO @dantengsky remove this? seems no longer supported https://docs.databend.com/sql/sql-commands/ddl/table/ddl-create-table-external-location - part_prefix: String, - block_location_prefix: String, segment_info_location_prefix: String, bloom_index_location_prefix: String, @@ -65,44 +62,23 @@ pub struct TableMetaLocationGenerator { impl TableMetaLocationGenerator { pub fn with_prefix(prefix: String) -> Self { + let block_location_prefix = format!("{}/{}/", &prefix, FUSE_TBL_BLOCK_PREFIX,); + let bloom_index_location_prefix = + format!("{}/{}/", &prefix, FUSE_TBL_XOR_BLOOM_INDEX_PREFIX); + let segment_info_location_prefix = format!("{}/{}/", &prefix, FUSE_TBL_SEGMENT_PREFIX); + Self { prefix, - part_prefix: "".to_string(), - block_location_prefix: "".to_string(), - segment_info_location_prefix: "".to_string(), - bloom_index_location_prefix: "".to_string(), + block_location_prefix, + segment_info_location_prefix, + bloom_index_location_prefix, } - .gen_prefixes() - } - - pub fn with_part_prefix(mut self, part_prefix: String) -> Self { - self.part_prefix = part_prefix; - self.gen_prefixes() - } - - fn gen_prefixes(mut self) -> Self { - let block_location_prefix = format!( - "{}/{}/{}", - &self.prefix, FUSE_TBL_BLOCK_PREFIX, &self.part_prefix, - ); - let bloom_index_location_prefix = - format!("{}/{}/", &self.prefix, FUSE_TBL_XOR_BLOOM_INDEX_PREFIX); - let segment_info_location_prefix = format!("{}/{}/", &self.prefix, FUSE_TBL_SEGMENT_PREFIX); - - self.block_location_prefix = block_location_prefix; - self.bloom_index_location_prefix = bloom_index_location_prefix; - self.segment_info_location_prefix = segment_info_location_prefix; - self } pub fn prefix(&self) -> &str { &self.prefix } - pub fn part_prefix(&self) -> &str { - &self.part_prefix - } - pub fn gen_block_location(&self) -> (Location, Uuid) { let part_uuid = Uuid::new_v4(); let location_path = format!( diff --git a/tests/sqllogictests/suites/mode/standalone/explain/explain.test b/tests/sqllogictests/suites/mode/standalone/explain/explain.test index 6dc1cbed25ef..bff989763ab1 100644 --- a/tests/sqllogictests/suites/mode/standalone/explain/explain.test +++ b/tests/sqllogictests/suites/mode/standalone/explain/explain.test @@ -236,7 +236,7 @@ query T explain syntax copy into 's3://mybucket/data.csv' from t1 file_format = ( type = CSV field_delimiter = ',' record_delimiter = '\n' skip_header = 1) ---- COPY -INTO Uri(UriLocation { protocol: "s3", name: "mybucket", path: "/data.csv", part_prefix: "", connection: Connection { visited_keys: {}, conns: {} } }) +INTO Uri(UriLocation { protocol: "s3", name: "mybucket", path: "/data.csv", connection: Connection { visited_keys: {}, conns: {} } }) FROM t1 FILE_FORMAT = ( field_delimiter = ',', diff --git a/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test b/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test index 8f93ac474229..81aaf022c559 100644 --- a/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test +++ b/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test @@ -212,7 +212,7 @@ query T explain syntax copy into 's3://mybucket/data.csv' from t1 file_format = ( type = CSV field_delimiter = ',' record_delimiter = '\n' skip_header = 1) ---- COPY -INTO Uri(UriLocation { protocol: "s3", name: "mybucket", path: "/data.csv", part_prefix: "", connection: Connection { visited_keys: {}, conns: {} } }) +INTO Uri(UriLocation { protocol: "s3", name: "mybucket", path: "/data.csv", connection: Connection { visited_keys: {}, conns: {} } }) FROM t1 FILE_FORMAT = ( field_delimiter = ',', diff --git a/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result b/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result index b1f1b392bbf4..8965b7099a46 100644 --- a/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result +++ b/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result @@ -1,3 +1,2 @@ 888 1024 -lulu diff --git a/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.sh b/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.sh index e2bfb6860b86..1d52ed0dbb0b 100755 --- a/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.sh +++ b/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.sh @@ -8,12 +8,10 @@ echo "drop table if exists table_external_location_with_location_prefix;" | $BEN ## Create table echo "create table table_external_location(a int) 's3://testbucket/admin/data/' connection=(access_key_id ='minioadmin' secret_access_key ='minioadmin' endpoint_url='${STORAGE_S3_ENDPOINT_URL}');" | $BENDSQL_CLIENT_CONNECT -echo "create table table_external_location_with_location_prefix(a int) 's3://testbucket/admin/data/' connection=(access_key_id ='minioadmin' secret_access_key ='minioadmin' endpoint_url='${STORAGE_S3_ENDPOINT_URL}') location_prefix = 'lulu_';" | $BENDSQL_CLIENT_CONNECT table_inserts=( "insert into table_external_location(a) values(888)" "insert into table_external_location(a) values(1024)" - "insert into table_external_location_with_location_prefix(a) values(888)" ) for i in "${table_inserts[@]}"; do @@ -22,10 +20,7 @@ done ## Select table echo "select * from table_external_location order by a;" | $BENDSQL_CLIENT_CONNECT -## select block_location and get part_prefix, block_location like this: 1/1209/_b/lulu_ca5ebf54bf894f4bb1ee232c1a0461a2_v0.parquet -echo "select block_location from fuse_block('default','table_external_location_with_location_prefix');" | $BENDSQL_CLIENT_CONNECT | cut -d "/" -f 4 | cut -d "_" -f 1 ## Drop table echo "drop table if exists table_external_location;" | $BENDSQL_CLIENT_CONNECT -echo "drop table if exists table_external_location_with_location_prefix;" | $BENDSQL_CLIENT_CONNECT