-
Notifications
You must be signed in to change notification settings - Fork 753
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
bug: The default file_limit is 48, but they don't delete exprise files #16702
Comments
Can not reproduce in this version: Init env: Deploy a standalone mode Databend-query & Databend-meta
Modify code, set default limit = 1 diff --git a/src/common/tracing/src/config.rs b/src/common/tracing/src/config.rs
index 256700ff9b..f010fdceed 100644
--- a/src/common/tracing/src/config.rs
+++ b/src/common/tracing/src/config.rs
@@ -64,8 +64,8 @@ impl Display for FileConfig {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
- "enabled={}, level={}, dir={}, format={}",
- self.on, self.level, self.dir, self.format
+ "enabled={}, level={}, dir={}, format={}, limit={}, prefix_filter={}",
+ self.on, self.level, self.dir, self.format, self.limit, self.prefix_filter
)
}
}
@@ -77,7 +77,7 @@ impl Default for FileConfig {
level: "INFO".to_string(),
dir: "./.databend/logs".to_string(),
format: "json".to_string(),
- limit: 48,
+ limit: 1,
prefix_filter: "databend_,openraft".to_string(),
}
}
diff --git a/src/common/tracing/src/loggers.rs b/src/common/tracing/src/loggers.rs
index 6285a438fb..21240a23c7 100644
--- a/src/common/tracing/src/loggers.rs
+++ b/src/common/tracing/src/loggers.rs
@@ -33,6 +33,7 @@ pub(crate) fn new_rolling_file_appender(
name: impl ToString,
max_files: usize,
) -> (RollingFile, Box<dyn Send + Sync + 'static>) {
+ println!("max_files: {:?}", max_files);
let rolling = RollingFileWriter::builder()
.rotation(Rotation::Hourly)
.filename_prefix(name.to_string())
diff --git a/src/query/config/src/config.rs b/src/query/config/src/config.rs
index 5cedd0c0d5..2753899878 100644
--- a/src/query/config/src/config.rs
+++ b/src/query/config/src/config.rs
@@ -2080,7 +2080,7 @@ pub struct FileLogConfig {
pub file_format: String,
/// Log file max
- #[clap(long = "log-file-limit", value_name = "VALUE", default_value = "48")]
+ #[clap(long = "log-file-limit", value_name = "VALUE", default_value = "1")]
#[serde(rename = "limit")]
pub file_limit: usize,
@@ -2106,6 +2106,7 @@ impl TryInto<InnerFileLogConfig> for FileLogConfig {
type Error = ErrorCode;
fn try_into(self) -> Result<InnerFileLogConfig> {
+ println!("line 2109 {}", self.file_limit.clone());
Ok(InnerFileLogConfig {
on: self.file_on,
level: self.file_level, rebuild, start databend-query , log.file.limit is default value, not write in config.toml # Usage:
# databend-query -c databend-query.toml
[query]
max_active_sessions = 256
# Internal flight rpc for cluster communication.
flight_api_address = "0.0.0.0:9091"
# Admin REST API.
admin_api_address = "0.0.0.0:8080"
# Metrics REST API.
metric_api_address = "0.0.0.0:7070"
# Query Handler: MySQL
mysql_handler_host = "0.0.0.0"
mysql_handler_port = 3307
# Query Handler: Clickhouse HTTP
clickhouse_http_handler_host = "0.0.0.0"
clickhouse_http_handler_port = 8124
# Query Handler: HTTP API
http_handler_host = "0.0.0.0"
http_handler_port = 8000
# Query Handler: Experimental Arrow Flight SQL API
flight_sql_handler_host = "0.0.0.0"
flight_sql_handler_port = 8900
tenant_id = "def"
cluster_id = "default"
#management_mode = true
table_engine_memory_enabled = true
enable_udf_server = true
udf_server_allow_list = ['http://0.0.0.0:8815']
#enable_meta_data_upgrade_json_to_pb_from_v307=true
#[[query.udfs]]
#name = "ping"
#definition = "CREATE FUNCTION ping(STRING) RETURNS STRING LANGUAGE python HANDLER = 'ping' ADDRESS = 'http://0.0.0.0:8815'"
[[query.users]]
name = "root"
auth_type = "no_password"
#[log.profile]
#on = true
#dir = "./.databend/profile_1"
[[query.users]]
name = "default"
auth_type = "no_password"
# [[query.users]]
# name = "databend"
# auth_type = "double_sha1_password"
# # echo -n "databend" | sha1sum | cut -d' ' -f1 | xxd -r -p | sha1sum
# auth_string = "3081f32caef285c232d066033c89a78d88a6d8a5"
# [[query.users]]
# name = "datafuselabs"
# auth_type = "sha256_password"
# # echo -n "datafuselabs" | sha256sum
# auth_string = "6db1a2f5da402b43c066fcadcbf78f04260b3236d9035e44dd463f21e29e6f3b"
[log]
[log.file]
level = "INFO"
dir = ".databend/logs"
[meta]
# It is a list of `grpc_api_advertise_host:<grpc-api-port>` of databend-meta config
endpoints = ["0.0.0.0:9191"]
username = "root"
password = "root"
client_timeout_in_second = 60
auto_sync_interval = 60
# Storage config.
[storage]
# fs | s3 | azblob | obs | oss
type = "fs"
# Set a local folder to store your data.
# Comment out this block if you're NOT using local file system as storage.
# To use S3-compatible object storage, uncomment this block and set your values.
# [storage.s3]
# bucket = "<your-bucket-name>"
# endpoint_url = "<your-endpoint>"
# access_key_id = "<your-key-id>"
# secret_access_key = "<your-account-key>"
# enable_virtual_host_style = false
# To use Azure Blob storage, uncomment this block and set your values.
# [storage.azblob]
# endpoint_url = "https://<your-storage-account-name>.blob.core.windows.net"
# container = "<your-azure-storage-container-name>"
# account_name = "<your-storage-account-name>"
# account_key = "<your-account-key>"
# To use OBS object storage, uncomment this block and set your values.
# [storage.obs]
# bucket = "<your-bucket-name>"
# endpoint_url = "<your-endpoint>"
# access_key_id = "<your-key-id>"
# secret_access_key = "<your-account-key>"
# To use OSS object storage, uncomment this block and set your values.
# [storage.oss]
# bucket = "<your-bucket-name>"
# endpoint_url = "<your-endpoint>"
# access_key_id = "<your-key-id>"
# access_key_secret = "<your-account-key>"
# Cache config.
[cache]
# Type of storage to keep the table data cache
#
# available options: [none|disk]
# default is "none", which disable table data cache
# use "disk" to enabled disk cache
data_cache_storage = "none"
[background]
enable = true
[background.compaction]
enable_compaction = true
compact_mode = "interval"
target_tables = ["default.target1", "default.target2"]
Start $ cargo build --bin databend-query && RUST_BACKTRACE=1 RUST_LOG=debug ./target/debug/databend-query -c test.toml --storage-allow-insecure # Version: v1.2.574-nightly-bd380eeeea(rust-1.81.0-nightly-2024-10-29T00:50:06.162961496Z)
$ ./target/debug/databend-meta --single --log-level=ERROR # v1.2.574-nightly-38cfe7686c-simd(1.81.0-nightly-2024-08-21T17:01:45.359166955Z) Databend-query Print
The query log file num is 1.
Wait 1 hour. The query log file num is still 1.
|
Search before asking
Version
main
What's Wrong?
The log default file_limit is 48, but they don;t delete exprise files.
How to Reproduce?
No response
Are you willing to submit PR?
The text was updated successfully, but these errors were encountered: