diff --git a/src/common/config/src/outer_v0.rs b/src/common/config/src/outer_v0.rs index b13c5beec2288..ecc73477c3c91 100644 --- a/src/common/config/src/outer_v0.rs +++ b/src/common/config/src/outer_v0.rs @@ -24,6 +24,7 @@ use common_exception::Result; use common_storage::StorageAzblobConfig as InnerStorageAzblobConfig; use common_storage::StorageConfig as InnerStorageConfig; use common_storage::StorageFsConfig as InnerStorageFsConfig; +use common_storage::StorageGcsConfig as InnerStorageGcsConfig; use common_storage::StorageHdfsConfig as InnerStorageHdfsConfig; use common_storage::StorageParams; use common_storage::StorageS3Config as InnerStorageS3Config; @@ -176,6 +177,10 @@ pub struct StorageConfig { #[clap(flatten)] pub fs: FsStorageConfig, + // GCS backend config + #[clap(flatten)] + pub gcs: GcsStorageConfig, + // S3 storage backend config. #[clap(flatten)] pub s3: S3StorageConfig, @@ -202,6 +207,7 @@ impl From for StorageConfig { storage_type: "".to_string(), allow_insecure: inner.allow_insecure, fs: Default::default(), + gcs: Default::default(), s3: Default::default(), azblob: Default::default(), hdfs: Default::default(), @@ -228,6 +234,10 @@ impl From for StorageConfig { cfg.storage_type = "s3".to_string(); cfg.s3 = v.into() } + StorageParams::Gcs(v) => { + cfg.storage_type = "gcs".to_string(); + cfg.gcs = v.into() + } v => unreachable!("{v:?} should not be used as storage backend"), } @@ -246,6 +256,7 @@ impl TryInto for StorageConfig { match self.storage_type.as_str() { "azblob" => StorageParams::Azblob(self.azblob.try_into()?), "fs" => StorageParams::Fs(self.fs.try_into()?), + "gcs" => StorageParams::Gcs(self.gcs.try_into()?), #[cfg(feature = "storage-hdfs")] "hdfs" => StorageParams::Hdfs(self.hdfs.try_into()?), "memory" => StorageParams::Memory, @@ -289,6 +300,69 @@ impl TryInto for FsStorageConfig { } } +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Args)] +#[serde(default)] +pub struct GcsStorageConfig { + #[clap( + long = "storage-gcs-endpoint-url", + default_value = "https://storage.googleapis.com" + )] + #[serde(rename = "endpoint_url")] + pub gcs_endpoint_url: String, + + #[clap(long = "storage-gcs-bucket", default_value_t)] + #[serde(rename = "bucket")] + pub gcs_bucket: String, + + #[clap(long = "storage-gcs-root", default_value_t)] + #[serde(rename = "root")] + pub gcs_root: String, + + #[clap(long = "storage-gcs-credential", default_value_t)] + pub credential: String, +} + +impl Default for GcsStorageConfig { + fn default() -> Self { + InnerStorageGcsConfig::default().into() + } +} + +impl Debug for GcsStorageConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("GcsStorageConfig") + .field("endpoint_url", &self.gcs_endpoint_url) + .field("root", &self.gcs_root) + .field("bucket", &self.gcs_bucket) + .field("credential", &mask_string(&self.credential, 3)) + .finish() + } +} + +impl From for GcsStorageConfig { + fn from(inner: InnerStorageGcsConfig) -> Self { + Self { + gcs_endpoint_url: inner.endpoint_url, + gcs_bucket: inner.bucket, + gcs_root: inner.root, + credential: inner.credential, + } + } +} + +impl TryInto for GcsStorageConfig { + type Error = ErrorCode; + + fn try_into(self) -> std::result::Result { + Ok(InnerStorageGcsConfig { + endpoint_url: self.gcs_endpoint_url, + bucket: self.gcs_bucket, + root: self.gcs_root, + credential: self.credential, + }) + } +} + #[derive(Clone, PartialEq, Serialize, Deserialize, Eq, Args)] #[serde(default)] pub struct S3StorageConfig { diff --git a/src/common/storage/src/config.rs b/src/common/storage/src/config.rs index 034c77c752d89..a3f09916f7d05 100644 --- a/src/common/storage/src/config.rs +++ b/src/common/storage/src/config.rs @@ -35,6 +35,7 @@ pub struct StorageConfig { pub enum StorageParams { Azblob(StorageAzblobConfig), Fs(StorageFsConfig), + Gcs(StorageGcsConfig), #[cfg(feature = "storage-hdfs")] Hdfs(StorageHdfsConfig), Http(StorageHttpConfig), @@ -58,6 +59,11 @@ impl Display for StorageParams { v.container, v.root, v.endpoint_url ), StorageParams::Fs(v) => write!(f, "fs://root={}", v.root), + StorageParams::Gcs(v) => write!( + f, + "gcs://bucket={},root={},endpoint={}", + v.bucket, v.root, v.endpoint_url + ), #[cfg(feature = "storage-hdfs")] StorageParams::Hdfs(v) => { write!(f, "hdfs://root={},name_node={}", v.root, v.name_node) @@ -90,6 +96,7 @@ impl StorageParams { StorageParams::Http(v) => v.endpoint_url.starts_with("https://"), StorageParams::Memory => false, StorageParams::S3(v) => v.endpoint_url.starts_with("https://"), + StorageParams::Gcs(v) => v.endpoint_url.starts_with("https://"), } } } @@ -130,6 +137,39 @@ impl Default for StorageFsConfig { } } +pub static STORAGE_GCS_DEFAULT_ENDPOINT: &str = "https://storage.googleapis.com"; + +/// Config for storage backend GCS. +#[derive(Clone, PartialEq, Eq, Deserialize, Serialize)] +pub struct StorageGcsConfig { + pub endpoint_url: String, + pub bucket: String, + pub root: String, + pub credential: String, +} + +impl Default for StorageGcsConfig { + fn default() -> Self { + Self { + endpoint_url: STORAGE_GCS_DEFAULT_ENDPOINT.to_string(), + bucket: String::new(), + root: String::new(), + credential: String::new(), + } + } +} + +impl Debug for StorageGcsConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("StorageGcsConfig") + .field("endpoint", &self.endpoint_url) + .field("bucket", &self.bucket) + .field("root", &self.root) + .field("credential", &mask_string(&self.credential, 3)) + .finish() + } +} + /// Config for storage backend hdfs. /// /// # Notes diff --git a/src/common/storage/src/lib.rs b/src/common/storage/src/lib.rs index 2f200e44cd357..bbebf6a629105 100644 --- a/src/common/storage/src/lib.rs +++ b/src/common/storage/src/lib.rs @@ -20,15 +20,18 @@ mod config; pub use config::StorageAzblobConfig; pub use config::StorageConfig; pub use config::StorageFsConfig; +pub use config::StorageGcsConfig; pub use config::StorageHdfsConfig; pub use config::StorageHttpConfig; pub use config::StorageParams; pub use config::StorageS3Config; +pub use config::STORAGE_GCS_DEFAULT_ENDPOINT; pub use config::STORAGE_S3_DEFAULT_ENDPOINT; mod operator; pub use operator::init_azblob_operator; pub use operator::init_fs_operator; +pub use operator::init_gcs_operator; #[cfg(feature = "storage-hdfs")] pub use operator::init_hdfs_operator; pub use operator::init_http_operator; diff --git a/src/common/storage/src/operator.rs b/src/common/storage/src/operator.rs index cfcb9e00373f2..b8d5a17e6cb37 100644 --- a/src/common/storage/src/operator.rs +++ b/src/common/storage/src/operator.rs @@ -17,6 +17,7 @@ use std::io::Result; use opendal::services::azblob; use opendal::services::fs; +use opendal::services::gcs; use opendal::services::http; use opendal::services::memory; use opendal::services::s3; @@ -26,6 +27,7 @@ use super::StorageAzblobConfig; use super::StorageFsConfig; use super::StorageParams; use super::StorageS3Config; +use crate::config::StorageGcsConfig; use crate::config::StorageHttpConfig; /// init_operator will init an opendal operator based on storage config. @@ -33,6 +35,7 @@ pub fn init_operator(cfg: &StorageParams) -> Result { Ok(match &cfg { StorageParams::Azblob(cfg) => init_azblob_operator(cfg)?, StorageParams::Fs(cfg) => init_fs_operator(cfg)?, + StorageParams::Gcs(cfg) => init_gcs_operator(cfg)?, #[cfg(feature = "storage-hdfs")] StorageParams::Hdfs(cfg) => init_hdfs_operator(cfg)?, StorageParams::Http(cfg) => init_http_operator(cfg)?, @@ -74,6 +77,20 @@ pub fn init_fs_operator(cfg: &StorageFsConfig) -> Result { Ok(Operator::new(builder.build()?)) } +/// init_gcs_operator will init a opendal gcs operator. +pub fn init_gcs_operator(cfg: &StorageGcsConfig) -> Result { + let mut builder = gcs::Builder::default(); + + let accessor = builder + .endpoint(&cfg.endpoint_url) + .bucket(&cfg.bucket) + .root(&cfg.root) + .credential(&cfg.credential) + .build()?; + + Ok(Operator::new(accessor)) +} + /// init_hdfs_operator will init an opendal hdfs operator. #[cfg(feature = "storage-hdfs")] pub fn init_hdfs_operator(cfg: &super::StorageHdfsConfig) -> Result { diff --git a/src/common/storages/preludes/src/system/configs_table.rs b/src/common/storages/preludes/src/system/configs_table.rs index 1562e6342adf2..cb56edce9462f 100644 --- a/src/common/storages/preludes/src/system/configs_table.rs +++ b/src/common/storages/preludes/src/system/configs_table.rs @@ -88,6 +88,7 @@ impl SyncSystemTable for ConfigsTable { let mut storage_config = config.storage; storage_config.s3.access_key_id = mask_string(&storage_config.s3.access_key_id, 3); storage_config.s3.secret_access_key = mask_string(&storage_config.s3.secret_access_key, 3); + storage_config.gcs.credential = mask_string(&storage_config.gcs.credential, 3); storage_config.azblob.account_name = mask_string(&storage_config.azblob.account_name, 3); storage_config.azblob.account_key = mask_string(&storage_config.azblob.account_key, 3); let storage_config_value = serde_json::to_value(storage_config)?; diff --git a/src/query/service/tests/it/configs.rs b/src/query/service/tests/it/configs.rs index cf84f6ee9828d..3e0750f0e7b54 100644 --- a/src/query/service/tests/it/configs.rs +++ b/src/query/service/tests/it/configs.rs @@ -105,6 +105,12 @@ allow_insecure = false [storage.fs] data_path = "_data" +[storage.gcs] +endpoint_url = "https://storage.googleapis.com" +bucket = "" +root = "" +credential = "" + [storage.s3] region = "" endpoint_url = "https://s3.amazonaws.com" @@ -168,6 +174,13 @@ fn test_env_config_s3() -> Result<()> { ("STORAGE_S3_ACCESS_KEY_ID", Some("us.key.id")), ("STORAGE_S3_SECRET_ACCESS_KEY", Some("us.key")), ("STORAGE_S3_BUCKET", Some("us.bucket")), + ( + "STORAGE_GCS_ENDPOINT_URL", + Some("http://gcs.storage.cname_map.local"), + ), + ("STORAGE_GCS_BUCKET", Some("gcs.bucket")), + ("STORAGE_GCS_ROOT", Some("/path/to/root")), + ("STORAGE_GCS_CREDENTIAL", Some("gcs.credential")), ("QUERY_TABLE_ENGINE_MEMORY_ENABLED", Some("true")), ("QUERY_DATABASE_ENGINE_GITHUB_ENABLED", Some("false")), ("CONFIG_FILE", None), @@ -197,6 +210,15 @@ fn test_env_config_s3() -> Result<()> { // config of fs should not be loaded, take default value. assert_eq!("_data", configured.storage.fs.data_path); + // Storage type is fs, gcs related value should be default. + assert_eq!( + "https://storage.googleapis.com", + configured.storage.gcs.gcs_endpoint_url + ); + assert_eq!("", configured.storage.gcs.gcs_bucket); + assert_eq!("", configured.storage.gcs.gcs_root); + assert_eq!("", configured.storage.gcs.credential); + assert_eq!("us.region", configured.storage.s3.region); assert_eq!("http://127.0.0.1:10024", configured.storage.s3.endpoint_url); assert_eq!("us.key.id", configured.storage.s3.access_key_id); @@ -248,6 +270,13 @@ fn test_env_config_fs() -> Result<()> { ("STORAGE_S3_ACCESS_KEY_ID", Some("us.key.id")), ("STORAGE_S3_SECRET_ACCESS_KEY", Some("us.key")), ("STORAGE_S3_BUCKET", Some("us.bucket")), + ( + "STORAGE_GCS_ENDPOINT_URL", + Some("http://gcs.storage.cname_map.local"), + ), + ("STORAGE_GCS_BUCKET", Some("gcs.bucket")), + ("STORAGE_GCS_ROOT", Some("/path/to/root")), + ("STORAGE_GCS_CREDENTIAL", Some("gcs.credential")), ("QUERY_TABLE_ENGINE_MEMORY_ENABLED", Some("true")), ("QUERY_DATABASE_ENGINE_GITHUB_ENABLED", Some("false")), ("CONFIG_FILE", None), @@ -283,6 +312,110 @@ fn test_env_config_fs() -> Result<()> { configured.storage.s3.endpoint_url ); + // Storage type is fs, gcs related value should be default. + assert_eq!( + "https://storage.googleapis.com", + configured.storage.gcs.gcs_endpoint_url + ); + assert_eq!("", configured.storage.gcs.gcs_bucket); + assert_eq!("", configured.storage.gcs.gcs_root); + assert_eq!("", configured.storage.gcs.credential); + + assert!(configured.query.table_engine_memory_enabled); + assert!(!configured.query.database_engine_github_enabled); + + assert!(configured.query.table_cache_enabled); + assert_eq!(512, configured.query.table_memory_cache_mb_size); + assert_eq!("_cache_env", configured.query.table_disk_cache_root); + assert_eq!(512, configured.query.table_disk_cache_mb_size); + }, + ); + + Ok(()) +} + +#[test] +fn test_env_config_gcs() -> Result<()> { + temp_env::with_vars( + vec![ + ("LOG_LEVEL", Some("DEBUG")), + ("QUERY_TENANT_ID", Some("tenant-1")), + ("QUERY_CLUSTER_ID", Some("cluster-1")), + ("QUERY_MYSQL_HANDLER_HOST", Some("127.0.0.1")), + ("QUERY_MYSQL_HANDLER_PORT", Some("3306")), + ("QUERY_MAX_ACTIVE_SESSIONS", Some("255")), + ("QUERY_CLICKHOUSE_HANDLER_HOST", Some("1.2.3.4")), + ("QUERY_CLICKHOUSE_HANDLER_PORT", Some("9000")), + ("QUERY_CLICKHOUSE_HTTP_HANDLER_HOST", Some("1.2.3.4")), + ("QUERY_CLICKHOUSE_HTTP_HANDLER_PORT", Some("8124")), + ("QUERY_HTTP_HANDLER_HOST", Some("1.2.3.4")), + ("QUERY_HTTP_HANDLER_PORT", Some("8001")), + ("QUERY_FLIGHT_API_ADDRESS", Some("1.2.3.4:9091")), + ("QUERY_ADMIN_API_ADDRESS", Some("1.2.3.4:8081")), + ("QUERY_METRIC_API_ADDRESS", Some("1.2.3.4:7071")), + ("QUERY_TABLE_CACHE_ENABLED", Some("true")), + ("QUERY_TABLE_MEMORY_CACHE_MB_SIZE", Some("512")), + ("QUERY_TABLE_DISK_CACHE_ROOT", Some("_cache_env")), + ("QUERY_TABLE_DISK_CACHE_MB_SIZE", Some("512")), + ("STORAGE_TYPE", Some("gcs")), + ("STORAGE_NUM_CPUS", Some("16")), + ("STORAGE_FS_DATA_PATH", Some("/tmp/test")), + ("STORAGE_S3_REGION", Some("us.region")), + ("STORAGE_S3_ENDPOINT_URL", Some("http://127.0.0.1:10024")), + ("STORAGE_S3_ACCESS_KEY_ID", Some("us.key.id")), + ("STORAGE_S3_SECRET_ACCESS_KEY", Some("us.key")), + ("STORAGE_S3_BUCKET", Some("us.bucket")), + ( + "STORAGE_GCS_ENDPOINT_URL", + Some("http://gcs.storage.cname_map.local"), + ), + ("STORAGE_GCS_BUCKET", Some("gcs.bucket")), + ("STORAGE_GCS_ROOT", Some("/path/to/root")), + ("STORAGE_GCS_CREDENTIAL", Some("gcs.credential")), + ("QUERY_TABLE_ENGINE_MEMORY_ENABLED", Some("true")), + ("QUERY_DATABASE_ENGINE_GITHUB_ENABLED", Some("false")), + ("CONFIG_FILE", None), + ], + || { + let configured = Config::load().expect("must success").into_outer(); + + assert_eq!("DEBUG", configured.log.level); + + assert_eq!("tenant-1", configured.query.tenant_id); + assert_eq!("cluster-1", configured.query.cluster_id); + assert_eq!("127.0.0.1", configured.query.mysql_handler_host); + assert_eq!(3306, configured.query.mysql_handler_port); + assert_eq!(255, configured.query.max_active_sessions); + assert_eq!("1.2.3.4", configured.query.clickhouse_http_handler_host); + assert_eq!(8124, configured.query.clickhouse_http_handler_port); + assert_eq!("1.2.3.4", configured.query.http_handler_host); + assert_eq!(8001, configured.query.http_handler_port); + + assert_eq!("1.2.3.4:9091", configured.query.flight_api_address); + assert_eq!("1.2.3.4:8081", configured.query.admin_api_address); + assert_eq!("1.2.3.4:7071", configured.query.metric_api_address); + + assert_eq!("gcs", configured.storage.storage_type); + assert_eq!(16, configured.storage.storage_num_cpus); + + assert_eq!( + "http://gcs.storage.cname_map.local", + configured.storage.gcs.gcs_endpoint_url + ); + assert_eq!("gcs.bucket", configured.storage.gcs.gcs_bucket); + assert_eq!("/path/to/root", configured.storage.gcs.gcs_root); + assert_eq!("gcs.credential", configured.storage.gcs.credential); + + // Storage type is gcs, fs related value should stay default. + assert_eq!("_data", configured.storage.fs.data_path); + + // Storage type is gcs, s3 related value should be default. + assert_eq!("", configured.storage.s3.region); + assert_eq!( + "https://s3.amazonaws.com", + configured.storage.s3.endpoint_url + ); + assert!(configured.query.table_engine_memory_enabled); assert!(!configured.query.database_engine_github_enabled); diff --git a/src/query/service/tests/it/storages/system/configs_table.rs b/src/query/service/tests/it/storages/system/configs_table.rs index 8bdee48bc90c4..ddb38d079d101 100644 --- a/src/query/service/tests/it/storages/system/configs_table.rs +++ b/src/query/service/tests/it/storages/system/configs_table.rs @@ -43,88 +43,92 @@ async fn test_configs_table() -> Result<()> { assert_eq!(block.num_columns(), 4); let expected = vec![ - "+---------+--------------------------------------+---------------------------+-------------+", - "| group | name | value | description |", - "+---------+--------------------------------------+---------------------------+-------------+", - "| log | dir | ./.databend/logs | |", - "| log | file.dir | ./.databend/logs | |", - "| log | file.level | DEBUG | |", - "| log | file.on | true | |", - "| log | level | DEBUG | |", - "| log | query_enabled | false | |", - "| log | stderr.level | DEBUG | |", - "| log | stderr.on | true | |", - "| meta | address | | |", - "| meta | auto_sync_interval | 10 | |", - "| meta | client_timeout_in_second | 10 | |", - "| meta | embedded_dir | ./.databend/meta_embedded | |", - "| meta | endpoints | | |", - "| meta | password | | |", - "| meta | rpc_tls_meta_server_root_ca_cert | | |", - "| meta | rpc_tls_meta_service_domain_name | localhost | |", - "| meta | username | root | |", - "| query | admin_api_address | 127.0.0.1:8080 | |", - "| query | api_tls_server_cert | | |", - "| query | api_tls_server_key | | |", - "| query | api_tls_server_root_ca_cert | | |", - "| query | async_insert_busy_timeout | 200 | |", - "| query | async_insert_max_data_size | 10000 | |", - "| query | async_insert_stale_timeout | 0 | |", - "| query | clickhouse_handler_host | 127.0.0.1 | |", - "| query | clickhouse_handler_port | 9000 | |", - "| query | clickhouse_http_handler_host | 127.0.0.1 | |", - "| query | clickhouse_http_handler_port | 8124 | |", - "| query | cluster_id | | |", - "| query | database_engine_github_enabled | true | |", - "| query | flight_api_address | 127.0.0.1:9090 | |", - "| query | http_handler_host | 127.0.0.1 | |", - "| query | http_handler_port | 8000 | |", - "| query | http_handler_result_timeout_millis | 10000 | |", - "| query | http_handler_tls_server_cert | | |", - "| query | http_handler_tls_server_key | | |", - "| query | http_handler_tls_server_root_ca_cert | | |", - "| query | jwt_key_file | | |", - "| query | management_mode | false | |", - "| query | max_active_sessions | 256 | |", - "| query | max_query_log_size | 10000 | |", - "| query | metric_api_address | 127.0.0.1:7070 | |", - "| query | mysql_handler_host | 127.0.0.1 | |", - "| query | mysql_handler_port | 3307 | |", - "| query | num_cpus | 0 | |", - "| query | rpc_tls_query_server_root_ca_cert | | |", - "| query | rpc_tls_query_service_domain_name | localhost | |", - "| query | rpc_tls_server_cert | | |", - "| query | rpc_tls_server_key | | |", - "| query | table_cache_block_meta_count | 102400 | |", - "| query | table_cache_enabled | false | |", - "| query | table_cache_segment_count | 10240 | |", - "| query | table_cache_snapshot_count | 256 | |", - "| query | table_disk_cache_mb_size | 1024 | |", - "| query | table_disk_cache_root | _cache | |", - "| query | table_engine_memory_enabled | true | |", - "| query | table_memory_cache_mb_size | 256 | |", - "| query | tenant_id | test | |", - "| query | wait_timeout_mills | 5000 | |", - "| storage | allow_insecure | false | |", - "| storage | azblob.account_key | | |", - "| storage | azblob.account_name | | |", - "| storage | azblob.container | | |", - "| storage | azblob.endpoint_url | | |", - "| storage | azblob.root | | |", - "| storage | fs.data_path | _data | |", - "| storage | hdfs.name_node | | |", - "| storage | hdfs.root | | |", - "| storage | num_cpus | 0 | |", - "| storage | s3.access_key_id | | |", - "| storage | s3.bucket | | |", - "| storage | s3.enable_virtual_host_style | false | |", - "| storage | s3.endpoint_url | https://s3.amazonaws.com | |", - "| storage | s3.master_key | | |", - "| storage | s3.region | | |", - "| storage | s3.root | | |", - "| storage | s3.secret_access_key | | |", - "| storage | type | fs | |", - "+---------+--------------------------------------+---------------------------+-------------+", + "+---------+--------------------------------------+--------------------------------+-------------+", + "| group | name | value | description |", + "+---------+--------------------------------------+--------------------------------+-------------+", + "| log | dir | ./.databend/logs | |", + "| log | file.dir | ./.databend/logs | |", + "| log | file.level | DEBUG | |", + "| log | file.on | true | |", + "| log | level | DEBUG | |", + "| log | query_enabled | false | |", + "| log | stderr.level | DEBUG | |", + "| log | stderr.on | true | |", + "| meta | address | | |", + "| meta | auto_sync_interval | 10 | |", + "| meta | client_timeout_in_second | 10 | |", + "| meta | embedded_dir | ./.databend/meta_embedded | |", + "| meta | endpoints | | |", + "| meta | password | | |", + "| meta | rpc_tls_meta_server_root_ca_cert | | |", + "| meta | rpc_tls_meta_service_domain_name | localhost | |", + "| meta | username | root | |", + "| query | admin_api_address | 127.0.0.1:8080 | |", + "| query | api_tls_server_cert | | |", + "| query | api_tls_server_key | | |", + "| query | api_tls_server_root_ca_cert | | |", + "| query | async_insert_busy_timeout | 200 | |", + "| query | async_insert_max_data_size | 10000 | |", + "| query | async_insert_stale_timeout | 0 | |", + "| query | clickhouse_handler_host | 127.0.0.1 | |", + "| query | clickhouse_handler_port | 9000 | |", + "| query | clickhouse_http_handler_host | 127.0.0.1 | |", + "| query | clickhouse_http_handler_port | 8124 | |", + "| query | cluster_id | | |", + "| query | database_engine_github_enabled | true | |", + "| query | flight_api_address | 127.0.0.1:9090 | |", + "| query | http_handler_host | 127.0.0.1 | |", + "| query | http_handler_port | 8000 | |", + "| query | http_handler_result_timeout_millis | 10000 | |", + "| query | http_handler_tls_server_cert | | |", + "| query | http_handler_tls_server_key | | |", + "| query | http_handler_tls_server_root_ca_cert | | |", + "| query | jwt_key_file | | |", + "| query | management_mode | false | |", + "| query | max_active_sessions | 256 | |", + "| query | max_query_log_size | 10000 | |", + "| query | metric_api_address | 127.0.0.1:7070 | |", + "| query | mysql_handler_host | 127.0.0.1 | |", + "| query | mysql_handler_port | 3307 | |", + "| query | num_cpus | 0 | |", + "| query | rpc_tls_query_server_root_ca_cert | | |", + "| query | rpc_tls_query_service_domain_name | localhost | |", + "| query | rpc_tls_server_cert | | |", + "| query | rpc_tls_server_key | | |", + "| query | table_cache_block_meta_count | 102400 | |", + "| query | table_cache_enabled | false | |", + "| query | table_cache_segment_count | 10240 | |", + "| query | table_cache_snapshot_count | 256 | |", + "| query | table_disk_cache_mb_size | 1024 | |", + "| query | table_disk_cache_root | _cache | |", + "| query | table_engine_memory_enabled | true | |", + "| query | table_memory_cache_mb_size | 256 | |", + "| query | tenant_id | test | |", + "| query | wait_timeout_mills | 5000 | |", + "| storage | allow_insecure | false | |", + "| storage | azblob.account_key | | |", + "| storage | azblob.account_name | | |", + "| storage | azblob.container | | |", + "| storage | azblob.endpoint_url | | |", + "| storage | azblob.root | | |", + "| storage | fs.data_path | _data | |", + "| storage | gcs.bucket | | |", + "| storage | gcs.credential | | |", + "| storage | gcs.endpoint_url | https://storage.googleapis.com | |", + "| storage | gcs.root | | |", + "| storage | hdfs.name_node | | |", + "| storage | hdfs.root | | |", + "| storage | num_cpus | 0 | |", + "| storage | s3.access_key_id | | |", + "| storage | s3.bucket | | |", + "| storage | s3.enable_virtual_host_style | false | |", + "| storage | s3.endpoint_url | https://s3.amazonaws.com | |", + "| storage | s3.master_key | | |", + "| storage | s3.region | | |", + "| storage | s3.root | | |", + "| storage | s3.secret_access_key | | |", + "| storage | type | fs | |", + "+---------+--------------------------------------+--------------------------------+-------------+", ]; common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice()); Ok(()) @@ -160,93 +164,97 @@ async fn test_configs_table_redact() -> Result<()> { assert_eq!(block.num_columns(), 4); let endpoint_url_link = format!( - "| storage | s3.endpoint_url | {:<24} | |", + "| storage | s3.endpoint_url | {:<24} | |", mock_server.uri() ); let expected = vec![ - "+---------+--------------------------------------+---------------------------+-------------+", - "| group | name | value | description |", - "+---------+--------------------------------------+---------------------------+-------------+", - "| log | dir | ./.databend/logs | |", - "| log | file.dir | ./.databend/logs | |", - "| log | file.level | DEBUG | |", - "| log | file.on | true | |", - "| log | level | DEBUG | |", - "| log | query_enabled | false | |", - "| log | stderr.level | DEBUG | |", - "| log | stderr.on | true | |", - "| meta | address | | |", - "| meta | auto_sync_interval | 10 | |", - "| meta | client_timeout_in_second | 10 | |", - "| meta | embedded_dir | ./.databend/meta_embedded | |", - "| meta | endpoints | | |", - "| meta | password | | |", - "| meta | rpc_tls_meta_server_root_ca_cert | | |", - "| meta | rpc_tls_meta_service_domain_name | localhost | |", - "| meta | username | root | |", - "| query | admin_api_address | 127.0.0.1:8080 | |", - "| query | api_tls_server_cert | | |", - "| query | api_tls_server_key | | |", - "| query | api_tls_server_root_ca_cert | | |", - "| query | async_insert_busy_timeout | 200 | |", - "| query | async_insert_max_data_size | 10000 | |", - "| query | async_insert_stale_timeout | 0 | |", - "| query | clickhouse_handler_host | 127.0.0.1 | |", - "| query | clickhouse_handler_port | 9000 | |", - "| query | clickhouse_http_handler_host | 127.0.0.1 | |", - "| query | clickhouse_http_handler_port | 8124 | |", - "| query | cluster_id | | |", - "| query | database_engine_github_enabled | true | |", - "| query | flight_api_address | 127.0.0.1:9090 | |", - "| query | http_handler_host | 127.0.0.1 | |", - "| query | http_handler_port | 8000 | |", - "| query | http_handler_result_timeout_millis | 10000 | |", - "| query | http_handler_tls_server_cert | | |", - "| query | http_handler_tls_server_key | | |", - "| query | http_handler_tls_server_root_ca_cert | | |", - "| query | jwt_key_file | | |", - "| query | management_mode | false | |", - "| query | max_active_sessions | 256 | |", - "| query | max_query_log_size | 10000 | |", - "| query | metric_api_address | 127.0.0.1:7070 | |", - "| query | mysql_handler_host | 127.0.0.1 | |", - "| query | mysql_handler_port | 3307 | |", - "| query | num_cpus | 0 | |", - "| query | rpc_tls_query_server_root_ca_cert | | |", - "| query | rpc_tls_query_service_domain_name | localhost | |", - "| query | rpc_tls_server_cert | | |", - "| query | rpc_tls_server_key | | |", - "| query | table_cache_block_meta_count | 102400 | |", - "| query | table_cache_enabled | false | |", - "| query | table_cache_segment_count | 10240 | |", - "| query | table_cache_snapshot_count | 256 | |", - "| query | table_disk_cache_mb_size | 1024 | |", - "| query | table_disk_cache_root | _cache | |", - "| query | table_engine_memory_enabled | true | |", - "| query | table_memory_cache_mb_size | 256 | |", - "| query | tenant_id | test | |", - "| query | wait_timeout_mills | 5000 | |", - "| storage | allow_insecure | false | |", - "| storage | azblob.account_key | | |", - "| storage | azblob.account_name | | |", - "| storage | azblob.container | | |", - "| storage | azblob.endpoint_url | | |", - "| storage | azblob.root | | |", - "| storage | fs.data_path | _data | |", - "| storage | hdfs.name_node | | |", - "| storage | hdfs.root | | |", - "| storage | num_cpus | 0 | |", - "| storage | s3.access_key_id | ******_id | |", - "| storage | s3.bucket | test | |", - "| storage | s3.enable_virtual_host_style | false | |", + "+---------+--------------------------------------+--------------------------------+-------------+", + "| group | name | value | description |", + "+---------+--------------------------------------+--------------------------------+-------------+", + "| log | dir | ./.databend/logs | |", + "| log | file.dir | ./.databend/logs | |", + "| log | file.level | DEBUG | |", + "| log | file.on | true | |", + "| log | level | DEBUG | |", + "| log | query_enabled | false | |", + "| log | stderr.level | DEBUG | |", + "| log | stderr.on | true | |", + "| meta | address | | |", + "| meta | auto_sync_interval | 10 | |", + "| meta | client_timeout_in_second | 10 | |", + "| meta | embedded_dir | ./.databend/meta_embedded | |", + "| meta | endpoints | | |", + "| meta | password | | |", + "| meta | rpc_tls_meta_server_root_ca_cert | | |", + "| meta | rpc_tls_meta_service_domain_name | localhost | |", + "| meta | username | root | |", + "| query | admin_api_address | 127.0.0.1:8080 | |", + "| query | api_tls_server_cert | | |", + "| query | api_tls_server_key | | |", + "| query | api_tls_server_root_ca_cert | | |", + "| query | async_insert_busy_timeout | 200 | |", + "| query | async_insert_max_data_size | 10000 | |", + "| query | async_insert_stale_timeout | 0 | |", + "| query | clickhouse_handler_host | 127.0.0.1 | |", + "| query | clickhouse_handler_port | 9000 | |", + "| query | clickhouse_http_handler_host | 127.0.0.1 | |", + "| query | clickhouse_http_handler_port | 8124 | |", + "| query | cluster_id | | |", + "| query | database_engine_github_enabled | true | |", + "| query | flight_api_address | 127.0.0.1:9090 | |", + "| query | http_handler_host | 127.0.0.1 | |", + "| query | http_handler_port | 8000 | |", + "| query | http_handler_result_timeout_millis | 10000 | |", + "| query | http_handler_tls_server_cert | | |", + "| query | http_handler_tls_server_key | | |", + "| query | http_handler_tls_server_root_ca_cert | | |", + "| query | jwt_key_file | | |", + "| query | management_mode | false | |", + "| query | max_active_sessions | 256 | |", + "| query | max_query_log_size | 10000 | |", + "| query | metric_api_address | 127.0.0.1:7070 | |", + "| query | mysql_handler_host | 127.0.0.1 | |", + "| query | mysql_handler_port | 3307 | |", + "| query | num_cpus | 0 | |", + "| query | rpc_tls_query_server_root_ca_cert | | |", + "| query | rpc_tls_query_service_domain_name | localhost | |", + "| query | rpc_tls_server_cert | | |", + "| query | rpc_tls_server_key | | |", + "| query | table_cache_block_meta_count | 102400 | |", + "| query | table_cache_enabled | false | |", + "| query | table_cache_segment_count | 10240 | |", + "| query | table_cache_snapshot_count | 256 | |", + "| query | table_disk_cache_mb_size | 1024 | |", + "| query | table_disk_cache_root | _cache | |", + "| query | table_engine_memory_enabled | true | |", + "| query | table_memory_cache_mb_size | 256 | |", + "| query | tenant_id | test | |", + "| query | wait_timeout_mills | 5000 | |", + "| storage | allow_insecure | false | |", + "| storage | azblob.account_key | | |", + "| storage | azblob.account_name | | |", + "| storage | azblob.container | | |", + "| storage | azblob.endpoint_url | | |", + "| storage | azblob.root | | |", + "| storage | fs.data_path | _data | |", + "| storage | gcs.bucket | | |", + "| storage | gcs.credential | | |", + "| storage | gcs.endpoint_url | https://storage.googleapis.com | |", + "| storage | gcs.root | | |", + "| storage | hdfs.name_node | | |", + "| storage | hdfs.root | | |", + "| storage | num_cpus | 0 | |", + "| storage | s3.access_key_id | ******_id | |", + "| storage | s3.bucket | test | |", + "| storage | s3.enable_virtual_host_style | false | |", &endpoint_url_link, - "| storage | s3.master_key | | |", - "| storage | s3.region | us-east-2 | |", - "| storage | s3.root | | |", - "| storage | s3.secret_access_key | ******key | |", - "| storage | type | s3 | |", - "+---------+--------------------------------------+---------------------------+-------------+", + "| storage | s3.master_key | | |", + "| storage | s3.region | us-east-2 | |", + "| storage | s3.root | | |", + "| storage | s3.secret_access_key | ******key | |", + "| storage | type | s3 | |", + "+---------+--------------------------------------+--------------------------------+-------------+", ]; common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice());