diff --git a/docker-compose.yml b/docker-compose.yml index 0229f74767..41968d5115 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -52,10 +52,7 @@ services: dockerfile: ./docker/Dockerfile image: cybertecpostgresql/pgwatch3:latest environment: - PW3_PGHOST: postgres - PW3_PGUSER: pgwatch3 - PW3_PGDATABASE: pgwatch3 - PW3_DATASTORE: postgres + PW3_CONFIG: postgresql://pgwatch3@postgres:5432/pgwatch3 PW3_PG_METRIC_STORE_CONN_STR: postgresql://pgwatch3@postgres:5432/pgwatch3_metrics ports: - "8080:8080" @@ -88,9 +85,9 @@ services: -f /tmp/00_helpers/get_table_bloat_approx_sql/12/metric.sql -f /tmp/00_helpers/get_wal_size/10/metric.sql -f /tmp/00_helpers/get_sequences/10/metric.sql - -c "INSERT INTO pgwatch3.monitored_db (md_unique_name, md_preset_config_name, md_config, md_hostname, md_port, md_dbname, md_user, md_password) - SELECT 'test', 'exhaustive', null, 'postgres', '5432', 'pgwatch3', 'pgwatch3', 'pgwatch3admin' - WHERE NOT EXISTS (SELECT * FROM pgwatch3.monitored_db WHERE (md_unique_name, md_hostname, md_dbname) = ('test', 'localhost', 'pgwatch3'))" + -c "INSERT INTO pgwatch3.monitored_db (md_name, md_preset_config_name, md_connstr) + SELECT 'test', 'exhaustive', 'postgresql://pgwatch3:pgwatch3admin@localhost/pgwatch3' + WHERE NOT EXISTS (SELECT * FROM pgwatch3.monitored_db WHERE md_name = 'test')" volumes: - "./src/metrics/sql/00_helpers:/tmp/00_helpers" depends_on: diff --git a/docs/ENV_VARIABLES.md b/docs/ENV_VARIABLES.md index 750cc2c752..08f9238ef6 100644 --- a/docs/ENV_VARIABLES.md +++ b/docs/ENV_VARIABLES.md @@ -9,18 +9,11 @@ Some variables influence multiple components. Command line parameters override e ## Gatherer daemon -- **PW3_PGHOST** Config DB host. Default: localhost -- **PW3_PGPORT** Config DB port. Default: 5432 -- **PW3_PGDATABASE** Config DB name. Default: pgwatch3 -- **PW3_PGUSER** Config DB user. Default: pgwatch3 -- **PW3_PGPASSWORD** Config DB password. Default: pgwatch3admin -- **PW3_PGSSL** Config DB SSL connection only. Default: False - **PW3_GROUP** Logical grouping/sharding key to monitor a subset of configured hosts. Default: - -- **PW3_DATASTORE** Backend for metric storage - [postgres|prometheus|json]. Default: postgres -- **PW3_VERBOSE** Logging vebosity. By default warning and errors are logged. Use [-v|-vv] to include [info|debug]. Default: - -- **PW3_PG_METRIC_STORE_CONN_STR** Postgres metric store connection string. Required when PW3_DATASTORE=postgres. Default: - +- **PW3_PG_METRIC_STORE_CONN_STR** Postgres metric store connection string. Default: - +- **PW3_JSON_STORAGE_FILE** File to store metric values. Default: - - **PW3_PG_RETENTION_DAYS** Effective when PW3_DATASTORE=postgres. Default: 14 -- **PW3_CONFIG** File mode. File or folder of YAML (.yaml/.yml) files containing info on which DBs to monitor and where to store metrics +- **PW3_CONFIG** Connection string (`postgresql://user:pwd@host/db`), file or folder of YAML (.yaml/.yml) files containing info on which DBs to monitor and where to store metrics - **PW3_METRICS_FOLDER** File mode. Folder of metrics definitions - **PW3_BATCHING_MAX_DELAY_MS** Max milliseconds to wait for a batched metrics flush. Default: 250 - **PW3_ADHOC_CONN_STR** Ad-hoc mode. Monitor a single Postgres DB / instance specified by a standard Libpq connection string @@ -50,33 +43,6 @@ Some variables influence multiple components. Command line parameters override e - **PW3_NO_HELPER_FUNCTIONS** Ignore metric definitions using helper functions (in form get_smth()) and don't also roll out any helpers automatically. Default: false - **PW3_TRY_CREATE_LISTED_EXTS_IF_MISSING** Try creating the listed extensions (comma sep.) on first connect for all monitored DBs when missing. Main usage - pg_stat_statements. Default: "" - -## Web UI - -- **PW3_WEBHOST** Network interface to listen on. Default: 0.0.0.0 -- **PW3_WEBPORT** Port. Default: 8080 -- **PW3_WEBSSL** Use HTTPS with self-signed certificates, Default: False -- **PW3_WEBCERT** Enables use of own certificates for custom deployments. Default: '/pgwatch3/persistent-config/self-signed-ssl.pem' -- **PW3_WEBKEY** Enables use of own certificates for custom deployments. Default: '/pgwatch3/persistent-config/self-signed-ssl.key' -- **PW3_WEBCERTCHAIN** Path to certificate chain file for custom deployments. Default: - -- **PW3_WEBNOANONYMOUS** Require user/password to edit data. Default: False -- **PW3_WEBUSER** Admin login. Default: pgwatch3 -- **PW3_WEBPASSWORD** Admin password. Default: pgwatch3admin -- **PW3_WEBNOCOMPONENTLOGS** Don't expose Docker component logs. Default: False -- **PW3_WEBNOSTATSSUMMARY** Don't expose summary metrics and "top queries" on monitored DBs. Default: False -- **PW3_VERBOSE** Logging vebosity. By default warning and errors are logged. Use [-v|-vv] to include [info|debug]. Default: - -- **PW3_PGHOST** Config DB host. Default: localhost -- **PW3_PGPORT** Config DB port. Default: 5432 -- **PW3_PGDATABASE** Config DB name. Default: pgwatch3 -- **PW3_PGUSER** Config DB user. Default: pgwatch3 -- **PW3_PGPASSWORD** Config DB password. Default: pgwatch3admin -- **PW3_PGSSL** Config DB SSL connection only. Default: False -- **PW3_GRAFANA_BASEURL** For linking to Grafana "Query details" dashboard from "Stat_stmt. overview". Default: http://0.0.0.0:3000 -- **PW3_AES_GCM_KEYPHRASE** Keyphrase for encryption/decpyption of connect string passwords. -- **PW3_AES_GCM_KEYPHRASE_FILE** File containing a keyphrase for encryption/decpyption of connect string passwords. -- **PW3_PG_METRIC_STORE_CONN_STR** Postgres metric store connection string. Required when PW3_DATASTORE=postgres. Default: - - - ## Grafana - **PW3_GRAFANANOANONYMOUS** Can be set to require login even for viewing dashboards. Default: - diff --git a/src/api.go b/src/api.go index 973e5f4d48..30f3148e65 100644 --- a/src/api.go +++ b/src/api.go @@ -98,23 +98,20 @@ func (uiapi uiapihandler) GetDatabases() (res string, err error) { // DeleteDatabase removes the database from the list of monitored databases func (uiapi uiapihandler) DeleteDatabase(database string) error { - _, err := configDb.Exec(context.TODO(), "DELETE FROM pgwatch3.monitored_db WHERE md_unique_name = $1", database) + _, err := configDb.Exec(context.TODO(), "DELETE FROM pgwatch3.monitored_db WHERE md_name = $1", database) return err } // AddDatabase adds the database to the list of monitored databases func (uiapi uiapihandler) AddDatabase(params []byte) error { sql := `INSERT INTO pgwatch3.monitored_db( -md_unique_name, md_preset_config_name, md_config, md_hostname, -md_port, md_dbname, md_user, md_password, md_is_superuser, md_is_enabled) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)` + md_name, md_connstr, md_preset_config_name, md_config, md_is_superuser, md_is_enabled) + VALUES ($1, $2, $3, $4, $5, $6)` var m map[string]any err := json.Unmarshal(params, &m) if err == nil { - _, err = configDb.Exec(context.TODO(), sql, m["md_unique_name"], m["md_preset_config_name"], - m["md_config"], m["md_hostname"], m["md_port"], - m["md_dbname"], m["md_user"], m["md_password"], - m["md_is_superuser"], m["md_is_enabled"]) + _, err = configDb.Exec(context.TODO(), sql, m["md_name"], m["md_connstr"], + m["md_preset_config_name"], m["md_config"], m["md_is_superuser"], m["md_is_enabled"]) } return err } @@ -137,7 +134,7 @@ func (uiapi uiapihandler) UpdateDatabase(database string, params []byte) error { if err != nil { return err } - sql := fmt.Sprintf(`UPDATE pgwatch3.monitored_db SET %s WHERE md_unique_name = $1`, strings.Join(fields, ",")) + sql := fmt.Sprintf(`UPDATE pgwatch3.monitored_db SET %s WHERE md_name = $1`, strings.Join(fields, ",")) values = append([]any{database}, values...) _, err = configDb.Exec(context.TODO(), sql, values...) return err diff --git a/src/config/cmdparser.go b/src/config/cmdparser.go index 1ff7b02202..102b453664 100644 --- a/src/config/cmdparser.go +++ b/src/config/cmdparser.go @@ -3,19 +3,24 @@ package config import ( "os" + "github.com/jackc/pgx/v5" flags "github.com/jessevdk/go-flags" ) +type Kind int + +const ( + ConfigPgURL Kind = iota + ConfigFile + ConfigFolder + ConfigError +) + // ConnectionOpts specifies the database connection options type ConnectionOpts struct { - Host string `long:"host" mapstructure:"host" description:"PG config DB host" default:"localhost" env:"PW3_PGHOST"` - Port string `short:"p" long:"port" mapstructure:"port" description:"PG config DB port" default:"5432" env:"PW3_PGPORT"` - Dbname string `short:"d" long:"dbname" mapstructure:"dbname" description:"PG config DB dbname" default:"pgwatch3" env:"PW3_PGDATABASE"` - User string `short:"u" long:"user" mapstructure:"user" description:"PG config DB user" default:"pgwatch3" env:"PW3_PGUSER"` - Password string `long:"password" mapstructure:"password" description:"PG config DB password" env:"PW3_PGPASSWORD"` - PgRequireSSL bool `long:"pg-require-ssl" mapstructure:"pg-require-ssl" description:"PG config DB SSL connection only" env:"PW3_PGSSL"` + Config string `short:"c" long:"config" mapstructure:"config" description:"File or folder of YAML files containing info on which DBs to monitor and where to store metrics" env:"PW3_CONFIG"` ServersRefreshLoopSeconds int `long:"servers-refresh-loop-seconds" mapstructure:"servers-refresh-loop-seconds" description:"Sleep time for the main loop" env:"PW3_SERVERS_REFRESH_LOOP_SECONDS" default:"120"` - Init bool `long:"init" description:"Initialize database schema to the latest version and exit. Can be used with --upgrade"` + Init bool `long:"init" description:"Initialize configuration database schema to the latest version and exit. Can be used with --upgrade"` } // MetricStoreOpts specifies the storage configuration to store metrics data @@ -64,7 +69,6 @@ type CmdOptions struct { Logging LoggingOpts `group:"Logging" mapstructure:"Logging"` WebUI WebUIOpts `group:"WebUI" mapstructure:"WebUI"` Start StartOpts `group:"Start" mapstructure:"Start"` - Config string `short:"c" long:"config" mapstructure:"config" description:"File or folder of YAML files containing info on which DBs to monitor and where to store metrics" env:"PW3_CONFIG"` BatchingDelayMs int64 `long:"batching-delay-ms" mapstructure:"batching-delay-ms" description:"Max milliseconds to wait for a batched metrics flush. [Default: 250]" default:"250" env:"PW3_BATCHING_MAX_DELAY_MS"` AdHocConnString string `long:"adhoc-conn-str" mapstructure:"adhoc-conn-str" description:"Ad-hoc mode: monitor a single Postgres DB specified by a standard Libpq connection string" env:"PW3_ADHOC_CONN_STR"` AdHocDBType string `long:"adhoc-dbtype" mapstructure:"adhoc-dbtype" description:"Ad-hoc mode: postgres|postgres-continuous-discovery" default:"postgres" env:"PW3_ADHOC_DBTYPE"` @@ -99,6 +103,20 @@ func (c CmdOptions) VersionOnly() bool { return len(os.Args) == 2 && c.Version } +func (c CmdOptions) GetConfigKind() (_ Kind, err error) { + if _, err := pgx.ParseConfig(c.Connection.Config); err == nil { + return Kind(ConfigPgURL), nil + } + var fi os.FileInfo + if fi, err = os.Stat(c.Connection.Config); err == nil { + if fi.IsDir() { + return Kind(ConfigFolder), nil + } + return Kind(ConfigFile), nil + } + return Kind(ConfigError), err +} + // NewCmdOptions returns a new instance of CmdOptions with default values func NewCmdOptions(args ...string) *CmdOptions { cmdOpts := new(CmdOptions) diff --git a/src/config/config.go b/src/config/config.go index 724d190975..11eaab665e 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -28,6 +28,11 @@ func checkFolderExistsAndReadable(path string) bool { return err == nil } +const ( + defaultMetricsDefinitionPathPkg = "/etc/pgwatch3/metrics" // prebuilt packages / Docker default location + defaultMetricsDefinitionPathDocker = "/pgwatch3/metrics" // prebuilt packages / Docker default location +) + func validateConfig(conf *CmdOptions) error { if conf.Connection.ServersRefreshLoopSeconds <= 1 { return errors.New("--servers-refresh-loop-seconds must be greater than 1") @@ -36,6 +41,10 @@ func validateConfig(conf *CmdOptions) error { return errors.New("--max-parallel-connections-per-db must be >= 1") } + if conf.Metric.MetricsFolder > "" && !checkFolderExistsAndReadable(conf.Metric.MetricsFolder) { + return fmt.Errorf("Could not read --metrics-folder path %s", conf.Metric.MetricsFolder) + } + if err := validateAesGcmConfig(conf); err != nil { return err } @@ -78,14 +87,17 @@ func validateAdHocConfig(conf *CmdOptions) error { if len(conf.AdHocConnString)*len(conf.AdHocConfig) == 0 { return errors.New("--adhoc-conn-str and --adhoc-config params both need to be specified for Ad-hoc mode to work") } - if conf.Config > "" { + if len(conf.Connection.Config) > 0 { return errors.New("Conflicting flags! --adhoc-conn-str and --config cannot be both set") } - if conf.Metric.MetricsFolder > "" && !checkFolderExistsAndReadable(conf.Metric.MetricsFolder) { - return fmt.Errorf("--metrics-folder \"%s\" not readable, trying 1st default paths and then Config DB to fetch metric definitions", conf.Metric.MetricsFolder) - } - if conf.Connection.User > "" && conf.Connection.Password > "" { - return errors.New("Conflicting flags! --adhoc-conn-str and --user/--password cannot be both set") + if conf.Metric.MetricsFolder == "" { + if checkFolderExistsAndReadable(defaultMetricsDefinitionPathPkg) { + conf.Metric.MetricsFolder = defaultMetricsDefinitionPathPkg + } else if checkFolderExistsAndReadable(defaultMetricsDefinitionPathDocker) { + conf.Metric.MetricsFolder = defaultMetricsDefinitionPathDocker + } else { + return errors.New("--adhoc-conn-str requires --metrics-folder") + } } if conf.AdHocDBType != DbTypePg && conf.AdHocDBType != DbTypePgCont { return fmt.Errorf("--adhoc-dbtype can be of: [ %s (single DB) | %s (all non-template DB-s on an instance) ]. Default: %s", DbTypePg, DbTypePgCont, DbTypePg) diff --git a/src/database.go b/src/database.go index 603c1405ea..67301ddd95 100644 --- a/src/database.go +++ b/src/database.go @@ -38,12 +38,7 @@ func InitSQLConnPoolForMonitoredDBIfNil(md MonitoredDatabase) error { return nil } - if md.DBType == config.DbTypeBouncer { - md.DBName = "pgbouncer" - } - - conn, err := db.GetPostgresDBConnection(mainContext, md.LibPQConnStr, md.Host, md.Port, md.DBName, md.User, md.Password, - md.SslMode, md.SslRootCAPath, md.SslClientCertPath, md.SslClientKeyPath) + conn, err := db.GetPostgresDBConnection(mainContext, md.ConnStr) if err != nil { return err } @@ -89,7 +84,7 @@ func DBExecRead(ctx context.Context, conn db.PgxIface, sql string, args ...any) return nil, err } -func DBExecReadByDbUniqueName(ctx context.Context, dbUnique string, stmtTimeoutOverride int64, sql string, args ...any) (metrics.MetricData, error) { +func DBExecReadByDbUniqueName(ctx context.Context, dbUnique string, sql string, args ...any) (metrics.MetricData, error) { var conn db.PgxIface var md MonitoredDatabase var data metrics.MetricData @@ -116,19 +111,6 @@ func DBExecReadByDbUniqueName(ctx context.Context, dbUnique string, stmtTimeoutO return nil, err } defer func() { _ = tx.Commit(ctx) }() - if !opts.IsAdHocMode() && IsPostgresDBType(md.DBType) { - stmtTimeout := md.StmtTimeout - if stmtTimeoutOverride > 0 { - stmtTimeout = stmtTimeoutOverride - } - if stmtTimeout > 0 { // 0 = don't change, use DB level settings - _, err = tx.Exec(ctx, fmt.Sprintf("SET LOCAL statement_timeout TO '%ds'", stmtTimeout)) - } - if err != nil { - atomic.AddUint64(&totalMetricFetchFailuresCounter, 1) - return nil, err - } - } if IsPostgresDBType(md.DBType) { _, err = tx.Exec(ctx, "SET LOCAL lock_timeout TO '100ms'") if err != nil { @@ -145,12 +127,12 @@ func DBExecReadByDbUniqueName(ctx context.Context, dbUnique string, stmtTimeoutO func GetAllActiveHostsFromConfigDB() (metrics.MetricData, error) { sqlLatest := ` select /* pgwatch3_generated */ - md_unique_name, md_group, md_dbtype, md_hostname, md_port, md_dbname, md_user, coalesce(md_password, '') as md_password, + md_name, md_group, md_dbtype, md_connstr, coalesce(p.pc_config, md_config)::text as md_config, coalesce(s.pc_config, md_config_standby, '{}'::jsonb)::text as md_config_standby, - md_statement_timeout_seconds, md_sslmode, md_is_superuser, + md_is_superuser, coalesce(md_include_pattern, '') as md_include_pattern, coalesce(md_exclude_pattern, '') as md_exclude_pattern, - coalesce(md_custom_tags::text, '{}') as md_custom_tags, md_root_ca_path, md_client_cert_path, md_client_key_path, - md_password_type, coalesce(md_host_config, '{}')::text as md_host_config, md_only_if_master + coalesce(md_custom_tags::text, '{}') as md_custom_tags, + md_encryption, coalesce(md_host_config, '{}')::text as md_host_config, md_only_if_master from pgwatch3.monitored_db left join @@ -177,7 +159,7 @@ func DBGetSizeMB(dbUnique string) (int64, error) { if err != nil || (ver.ExecEnv != execEnvAzureSingle) || (ver.ExecEnv == execEnvAzureSingle && ver.ApproxDBSizeB < 1e12) { logger.Debugf("[%s] determining DB size ...", dbUnique) - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 300, sqlDbSize) // can take some time on ancient FS, use 300s stmt timeout + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sqlDbSize) // can take some time on ancient FS, use 300s stmt timeout if err != nil { logger.Errorf("[%s] failed to determine DB size...cannot apply --min-db-size-mb flag. err: %v ...", dbUnique, err) return 0, err @@ -212,7 +194,7 @@ func TryDiscoverExecutionEnv(dbUnique string) string { 'UNKNOWN' end as exec_env; ` - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sqlPGExecEnv) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sqlPGExecEnv) if err != nil { return "" } @@ -228,7 +210,7 @@ func GetDBTotalApproxSize(dbUnique string) (int64, error) { where /* works only for v9.1+*/ c.relpersistence != 't'; ` - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sqlApproxDBSize) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sqlApproxDBSize) if err != nil { return 0, err } @@ -274,7 +256,7 @@ func DBGetPGVersion(ctx context.Context, dbUnique string, dbType string, noCache } if dbType == config.DbTypeBouncer { - data, err := DBExecReadByDbUniqueName(ctx, dbUnique, 0, "show version") + data, err := DBExecReadByDbUniqueName(ctx, dbUnique, "show version") if err != nil { return verNew, err } @@ -292,7 +274,7 @@ func DBGetPGVersion(ctx context.Context, dbUnique string, dbType string, noCache verNew.Version = VersionToInt(matches[0]) } } else if dbType == config.DbTypePgPOOL { - data, err := DBExecReadByDbUniqueName(ctx, dbUnique, 0, pgpoolVersion) + data, err := DBExecReadByDbUniqueName(ctx, dbUnique, pgpoolVersion) if err != nil { return verNew, err } @@ -309,7 +291,7 @@ func DBGetPGVersion(ctx context.Context, dbUnique string, dbType string, noCache verNew.Version = VersionToInt(matches[0]) } } else { - data, err := DBExecReadByDbUniqueName(ctx, dbUnique, 0, sql) + data, err := DBExecReadByDbUniqueName(ctx, dbUnique, sql) if err != nil { if noCache { return ver, err @@ -325,7 +307,7 @@ func DBGetPGVersion(ctx context.Context, dbUnique string, dbType string, noCache if verNew.Version > VersionToInt("10.0") && opts.Metric.SystemIdentifierField > "" { logger.Debugf("[%s] determining system identifier version (pg ver: %v)", dbUnique, verNew.VersionStr) - data, err := DBExecReadByDbUniqueName(ctx, dbUnique, 0, sqlSysid) + data, err := DBExecReadByDbUniqueName(ctx, dbUnique, sqlSysid) if err == nil && len(data) > 0 { verNew.SystemIdentifier = data[0]["system_identifier"].(string) } @@ -353,7 +335,7 @@ func DBGetPGVersion(ctx context.Context, dbUnique string, dbType string, noCache } logger.Debugf("[%s] determining if monitoring user is a superuser...", dbUnique) - data, err = DBExecReadByDbUniqueName(ctx, dbUnique, 0, sqlSu) + data, err = DBExecReadByDbUniqueName(ctx, dbUnique, sqlSu) if err == nil { verNew.IsSuperuser = data[0]["rolsuper"].(bool) } @@ -361,7 +343,7 @@ func DBGetPGVersion(ctx context.Context, dbUnique string, dbType string, noCache if verNew.Version >= MinExtensionInfoAvailable { //log.Debugf("[%s] determining installed extensions info...", dbUnique) - data, err = DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sqlExtensions) + data, err = DBExecReadByDbUniqueName(mainContext, dbUnique, sqlExtensions) if err != nil { logger.Errorf("[%s] failed to determine installed extensions info: %v", dbUnique, err) } else { @@ -403,7 +385,7 @@ func DetectSprocChanges(dbUnique string, vme DBVersionMapEntry, storageCh chan<- return changeCounts } - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.SQL) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { logger.Error("could not read sproc_hashes from monitored host: ", dbUnique, ", err:", err) return changeCounts @@ -464,7 +446,7 @@ func DetectSprocChanges(dbUnique string, vme DBVersionMapEntry, storageCh chan<- logger.Debugf("[%s][%s] detected %d sproc changes", dbUnique, specialMetricChangeEvents, len(detectedChanges)) if len(detectedChanges) > 0 { md, _ := GetMonitoredDatabaseByUniqueName(dbUnique) - storageCh <- []metrics.MetricStoreMessage{{DBUniqueName: dbUnique, MetricName: "sproc_changes", Data: detectedChanges, CustomTags: md.CustomTags}} + storageCh <- []metrics.MetricStoreMessage{{DBName: dbUnique, MetricName: "sproc_changes", Data: detectedChanges, CustomTags: md.CustomTags}} } return changeCounts @@ -487,7 +469,7 @@ func DetectTableChanges(dbUnique string, vme DBVersionMapEntry, storageCh chan<- return changeCounts } - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.SQL) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { logger.Error("could not read table_hashes from monitored host:", dbUnique, ", err:", err) return changeCounts @@ -548,7 +530,7 @@ func DetectTableChanges(dbUnique string, vme DBVersionMapEntry, storageCh chan<- logger.Debugf("[%s][%s] detected %d table changes", dbUnique, specialMetricChangeEvents, len(detectedChanges)) if len(detectedChanges) > 0 { md, _ := GetMonitoredDatabaseByUniqueName(dbUnique) - storageCh <- []metrics.MetricStoreMessage{{DBUniqueName: dbUnique, MetricName: "table_changes", Data: detectedChanges, CustomTags: md.CustomTags}} + storageCh <- []metrics.MetricStoreMessage{{DBName: dbUnique, MetricName: "table_changes", Data: detectedChanges, CustomTags: md.CustomTags}} } return changeCounts @@ -571,7 +553,7 @@ func DetectIndexChanges(dbUnique string, vme DBVersionMapEntry, storageCh chan<- return changeCounts } - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.SQL) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { logger.Error("could not read index_hashes from monitored host:", dbUnique, ", err:", err) return changeCounts @@ -630,7 +612,7 @@ func DetectIndexChanges(dbUnique string, vme DBVersionMapEntry, storageCh chan<- logger.Debugf("[%s][%s] detected %d index changes", dbUnique, specialMetricChangeEvents, len(detectedChanges)) if len(detectedChanges) > 0 { md, _ := GetMonitoredDatabaseByUniqueName(dbUnique) - storageCh <- []metrics.MetricStoreMessage{{DBUniqueName: dbUnique, MetricName: "index_changes", Data: detectedChanges, CustomTags: md.CustomTags}} + storageCh <- []metrics.MetricStoreMessage{{DBName: dbUnique, MetricName: "index_changes", Data: detectedChanges, CustomTags: md.CustomTags}} } return changeCounts @@ -654,7 +636,7 @@ func DetectPrivilegeChanges(dbUnique string, vme DBVersionMapEntry, storageCh ch } // returns rows of: object_type, tag_role, tag_object, privilege_type - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.SQL) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { logger.Errorf("[%s][%s] failed to fetch object privileges info: %v", dbUnique, specialMetricChangeEvents, err) return changeCounts @@ -708,10 +690,10 @@ func DetectPrivilegeChanges(dbUnique string, vme DBVersionMapEntry, storageCh ch md, _ := GetMonitoredDatabaseByUniqueName(dbUnique) storageCh <- []metrics.MetricStoreMessage{ { - DBUniqueName: dbUnique, - MetricName: "privilege_changes", - Data: detectedChanges, - CustomTags: md.CustomTags, + DBName: dbUnique, + MetricName: "privilege_changes", + Data: detectedChanges, + CustomTags: md.CustomTags, }} } @@ -735,7 +717,7 @@ func DetectConfigurationChanges(dbUnique string, vme DBVersionMapEntry, storageC return changeCounts } - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.SQL) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { logger.Errorf("[%s][%s] could not read configuration_hashes from monitored host: %v", dbUnique, specialMetricChangeEvents, err) return changeCounts @@ -772,10 +754,10 @@ func DetectConfigurationChanges(dbUnique string, vme DBVersionMapEntry, storageC if len(detectedChanges) > 0 { md, _ := GetMonitoredDatabaseByUniqueName(dbUnique) storageCh <- []metrics.MetricStoreMessage{{ - DBUniqueName: dbUnique, - MetricName: "configuration_changes", - Data: detectedChanges, - CustomTags: md.CustomTags, + DBName: dbUnique, + MetricName: "configuration_changes", + Data: detectedChanges, + CustomTags: md.CustomTags, }} } @@ -816,7 +798,7 @@ func CheckForPGObjectChangesAndStore(dbUnique string, vme DBVersionMapEntry, sto influxEntry["epoch_ns"] = time.Now().UnixNano() detectedChangesSummary = append(detectedChangesSummary, influxEntry) md, _ := GetMonitoredDatabaseByUniqueName(dbUnique) - storageCh <- []metrics.MetricStoreMessage{{DBUniqueName: dbUnique, + storageCh <- []metrics.MetricStoreMessage{{DBName: dbUnique, DBType: md.DBType, MetricName: "object_changes", Data: detectedChangesSummary, @@ -835,7 +817,7 @@ func FetchMetricsPgpool(msg MetricFetchMessage, _ DBVersionMapEntry, mvp metrics for _, sql := range sqlLines { if strings.HasPrefix(sql, "SHOW POOL_NODES") { - data, err := DBExecReadByDbUniqueName(mainContext, msg.DBUniqueName, 0, sql) + data, err := DBExecReadByDbUniqueName(mainContext, msg.DBUniqueName, sql) if err != nil { logger.Errorf("[%s][%s] Could not fetch PgPool statistics: %v", msg.DBUniqueName, msg.MetricName, err) return data, err @@ -889,7 +871,7 @@ func FetchMetricsPgpool(msg MetricFetchMessage, _ DBVersionMapEntry, mvp metrics continue } - data, err := DBExecReadByDbUniqueName(mainContext, msg.DBUniqueName, 0, sql) + data, err := DBExecReadByDbUniqueName(mainContext, msg.DBUniqueName, sql) if err != nil { logger.Errorf("[%s][%s] Could not fetch PgPool statistics: %v", msg.DBUniqueName, msg.MetricName, err) continue @@ -922,7 +904,7 @@ func FetchMetricsPgpool(msg MetricFetchMessage, _ DBVersionMapEntry, mvp metrics func DoesFunctionExists(dbUnique, functionName string) bool { logger.Debug("Checking for function existence", dbUnique, functionName) sql := fmt.Sprintf("select /* pgwatch3_generated */ 1 from pg_proc join pg_namespace n on pronamespace = n.oid where proname = '%s' and n.nspname = 'public'", functionName) - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sql) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sql) if err != nil { logger.Error("Failed to check for function existence", dbUnique, functionName, err) return false @@ -942,7 +924,7 @@ func TryCreateMissingExtensions(dbUnique string, extensionNames []string, existi extsCreated := make([]string, 0) // For security reasons don't allow to execute random strings but check that it's an existing extension - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sqlAvailable) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sqlAvailable) if err != nil { logger.Infof("[%s] Failed to get a list of available extensions: %v", dbUnique, err) return extsCreated @@ -962,7 +944,7 @@ func TryCreateMissingExtensions(dbUnique string, extensionNames []string, existi logger.Errorf("[%s] Requested extension %s not available on instance, cannot try to create...", dbUnique, extToCreate) } else { sqlCreateExt := `create extension ` + extToCreate - _, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sqlCreateExt) + _, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sqlCreateExt) if err != nil { logger.Errorf("[%s] Failed to create extension %s (based on --try-create-listed-exts-if-missing input): %v", dbUnique, extToCreate, err) } @@ -1002,7 +984,7 @@ func TryCreateMetricsFetchingHelpers(dbUnique string) error { logger.Warning("Could not find query text for", dbUnique, helperName) continue } - _, err = DBExecReadByDbUniqueName(mainContext, dbUnique, 0, mvp.SQL) + _, err = DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { logger.Warning("Failed to create a metric fetching helper for", dbUnique, helperName) logger.Warning(err) @@ -1034,7 +1016,7 @@ func TryCreateMetricsFetchingHelpers(dbUnique string) error { logger.Warning("Could not find query text for", dbUnique, metric) continue } - _, err = DBExecReadByDbUniqueName(mainContext, dbUnique, 0, mvp.SQL) + _, err = DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { logger.Warning("Failed to create a metric fetching helper for", dbUnique, metric) logger.Warning(err) @@ -1053,23 +1035,9 @@ func ResolveDatabasesFromConfigEntry(ce MonitoredDatabase) ([]MonitoredDatabase, var err error md := make([]MonitoredDatabase, 0) - // some cloud providers limit access to template1 for some reason, so try with postgres and defaultdb (Aiven) - templateDBsToTry := []string{"template1", "postgres", "defaultdb"} - - for _, templateDB := range templateDBsToTry { - c, err = db.GetPostgresDBConnection(mainContext, ce.LibPQConnStr, ce.Host, ce.Port, templateDB, ce.User, ce.Password, - ce.SslMode, ce.SslRootCAPath, ce.SslClientCertPath, ce.SslClientKeyPath) - if err != nil { - return md, err - } - err = c.Ping(mainContext) - if err == nil { - break - } - c.Close() - } + c, err = db.GetPostgresDBConnection(mainContext, ce.ConnStr) if err != nil { - return md, fmt.Errorf("Failed to connect to any of the template DBs: %v", templateDBsToTry) + return md, err } defer c.Close() @@ -1091,17 +1059,8 @@ func ResolveDatabasesFromConfigEntry(ce MonitoredDatabase) ([]MonitoredDatabase, md = append(md, MonitoredDatabase{ DBUniqueName: ce.DBUniqueName + "_" + d["datname_escaped"].(string), DBUniqueNameOrig: ce.DBUniqueName, - DBName: d["datname"].(string), - Host: ce.Host, - Port: ce.Port, - User: ce.User, - Password: ce.Password, - PasswordType: ce.PasswordType, - SslMode: ce.SslMode, - SslRootCAPath: ce.SslRootCAPath, - SslClientCertPath: ce.SslClientCertPath, - SslClientKeyPath: ce.SslClientKeyPath, - StmtTimeout: ce.StmtTimeout, + ConnStr: ce.ConnStr, + Encryption: ce.Encryption, Metrics: ce.Metrics, MetricsStandby: ce.MetricsStandby, PresetMetrics: ce.PresetMetrics, @@ -1120,12 +1079,12 @@ func ResolveDatabasesFromConfigEntry(ce MonitoredDatabase) ([]MonitoredDatabase, func GetGoPsutilDiskPG(dbUnique string) (metrics.MetricData, error) { sql := `select current_setting('data_directory') as dd, current_setting('log_directory') as ld, current_setting('server_version_num')::int as pgver` sqlTS := `select spcname::text as name, pg_catalog.pg_tablespace_location(oid) as location from pg_catalog.pg_tablespace where not spcname like any(array[E'pg\\_%'])` - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sql) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sql) if err != nil || len(data) == 0 { logger.Errorf("Failed to determine relevant PG disk paths via SQL: %v", err) return nil, err } - dataTblsp, err := DBExecReadByDbUniqueName(mainContext, dbUnique, 0, sqlTS) + dataTblsp, err := DBExecReadByDbUniqueName(mainContext, dbUnique, sqlTS) if err != nil { logger.Infof("Failed to determine relevant PG tablespace paths via SQL: %v", err) } diff --git a/src/db/bootstrap.go b/src/db/bootstrap.go index a67b6277ab..9104224bde 100644 --- a/src/db/bootstrap.go +++ b/src/db/bootstrap.go @@ -2,8 +2,6 @@ package db import ( "context" - "fmt" - "strings" "time" "github.com/cybertec-postgresql/pgwatch3/log" @@ -26,46 +24,21 @@ func TryDatabaseConnection(ctx context.Context, connStr string) error { return err } -func GetPostgresDBConnection(ctx context.Context, libPqConnString, host, port, dbname, user, password, sslmode, sslrootcert, sslcert, sslkey string) (PgxPoolIface, error) { - var connStr string - - //log.Debug("Connecting to: ", host, port, dbname, user, password) - if len(libPqConnString) > 0 { - connStr = libPqConnString - if !strings.Contains(strings.ToLower(connStr), "sslmode") { - if strings.Contains(connStr, "postgresql://") || strings.Contains(connStr, "postgres://") { // JDBC style - if strings.Contains(connStr, "?") { // has some extra params already - connStr += "&sslmode=disable" // defaulting to "disable" as Go driver doesn't support "prefer" - } else { - connStr += "?sslmode=disable" - } - } else { // LibPQ style - connStr += " sslmode=disable" - } - } - if !strings.Contains(strings.ToLower(connStr), "connect_timeout") { - if strings.Contains(connStr, "postgresql://") || strings.Contains(connStr, "postgres://") { // JDBC style - if strings.Contains(connStr, "?") { // has some extra params already - connStr += "&connect_timeout=5" // 5 seconds - } else { - connStr += "?connect_timeout=5" - } - } else { // LibPQ style - connStr += " connect_timeout=5" - } - } - } else { - connStr = fmt.Sprintf("host=%s port=%s dbname='%s' sslmode=%s user=%s application_name=%s sslrootcert='%s' sslcert='%s' sslkey='%s' connect_timeout=5", - host, port, dbname, sslmode, user, applicationName, sslrootcert, sslcert, sslkey) - if password != "" { // having empty string as password effectively disables .pgpass so include only if password given - connStr += fmt.Sprintf(" password='%s'", password) - } - } +type ConnConfigCallback = func(*pgx.ConnConfig) error +func GetPostgresDBConnection(ctx context.Context, connStr string, callbacks ...ConnConfigCallback) (PgxPoolIface, error) { connConfig, err := pgxpool.ParseConfig(connStr) if err != nil { return nil, err } + for _, f := range callbacks { + if err = f(connConfig.ConnConfig); err != nil { + return nil, err + } + } + if connConfig.ConnConfig.ConnectTimeout == 0 { + connConfig.ConnConfig.ConnectTimeout = time.Second * 5 + } connConfig.MaxConnIdleTime = 15 * time.Second connConfig.MaxConnLifetime = pgConnRecycleSeconds * time.Second tracelogger := &tracelog.TraceLog{ @@ -78,11 +51,10 @@ func GetPostgresDBConnection(ctx context.Context, libPqConnString, host, port, d var backoff = retry.WithMaxRetries(3, retry.NewConstant(1*time.Second)) -func InitAndTestConfigStoreConnection(ctx context.Context, host, port, dbname, user, password string, requireSSL bool) (configDb PgxPoolIface, err error) { +func InitAndTestConfigStoreConnection(ctx context.Context, connStr string) (configDb PgxPoolIface, err error) { logger := log.GetLogger(ctx) - SSLMode := map[bool]string{false: "disable", true: "require"}[requireSSL] if err = retry.Do(ctx, backoff, func(ctx context.Context) error { - if configDb, err = GetPostgresDBConnection(ctx, "", host, port, dbname, user, password, SSLMode, "", "", ""); err == nil { + if configDb, err = GetPostgresDBConnection(ctx, connStr); err == nil { err = configDb.Ping(ctx) } if err != nil { @@ -101,7 +73,7 @@ func InitAndTestConfigStoreConnection(ctx context.Context, host, port, dbname, u func InitAndTestMetricStoreConnection(ctx context.Context, connStr string) (metricDb PgxPoolIface, err error) { logger := log.GetLogger(ctx) if err = retry.Do(ctx, backoff, func(ctx context.Context) error { - if metricDb, err = GetPostgresDBConnection(ctx, connStr, "", "", "", "", "", "", "", "", ""); err == nil { + if metricDb, err = GetPostgresDBConnection(ctx, connStr); err == nil { err = metricDb.Ping(ctx) } if err != nil { diff --git a/src/db/sql/config/config_schema.sql b/src/db/sql/config/config_schema.sql index 8747d2d968..da18c92af1 100644 --- a/src/db/sql/config/config_schema.sql +++ b/src/db/sql/config/config_schema.sql @@ -17,46 +17,30 @@ create table if not exists pgwatch3.preset_config ( -- drop table if exists pgwatch3.monitored_db; create table if not exists pgwatch3.monitored_db ( - md_id serial not null primary key, - md_unique_name text not null, - md_hostname text not null, - md_port text not null default 5432, - md_dbname text not null, - md_user text not null, - md_password text, - md_is_superuser boolean not null default false, - md_sslmode text not null default 'disable', -- set to 'require' for to force SSL - md_preset_config_name text references pgwatch3.preset_config(pc_name) default 'basic', - md_config jsonb, - md_is_enabled boolean not null default 't', - md_last_modified_on timestamptz not null default now(), - md_statement_timeout_seconds int not null default 5, -- metrics queries will be canceled after so many seconds - md_dbtype text not null default 'postgres', - md_include_pattern text, -- valid regex expected. relevant for 'postgres-continuous-discovery' - md_exclude_pattern text, -- valid regex expected. relevant for 'postgres-continuous-discovery' - md_custom_tags jsonb, - md_group text not null default 'default', - md_root_ca_path text not null default '', -- relevant for 'verify-ca', 'verify-full' - md_client_cert_path text not null default '', -- relevant for 'verify-full' - md_client_key_path text not null default '', -- relevant for 'verify-full' - md_password_type text not null default 'plain-text', - md_host_config jsonb, - md_only_if_master bool not null default false, - md_preset_config_name_standby text references pgwatch3.preset_config(pc_name), - md_config_standby jsonb, + md_name text not null primary key, + md_connstr text not null, + md_is_superuser boolean not null default false, + md_preset_config_name text references pgwatch3.preset_config(pc_name) default 'basic', + md_config jsonb, + md_is_enabled boolean not null default 't', + md_last_modified_on timestamptz not null default now(), + md_dbtype text not null default 'postgres', + md_include_pattern text, -- valid regex expected. relevant for 'postgres-continuous-discovery' + md_exclude_pattern text, -- valid regex expected. relevant for 'postgres-continuous-discovery' + md_custom_tags jsonb, + md_group text not null default 'default', + md_encryption text not null default 'plain-text', + md_host_config jsonb, + md_only_if_master bool not null default false, + md_preset_config_name_standby text references pgwatch3.preset_config(pc_name), + md_config_standby jsonb, - UNIQUE (md_unique_name), - CONSTRAINT no_colon_on_unique_name CHECK (md_unique_name !~ ':'), - CHECK (md_sslmode in ('disable', 'require', 'verify-ca', 'verify-full')), + CONSTRAINT no_colon_on_unique_name CHECK (md_name !~ ':'), CHECK (md_dbtype in ('postgres', 'pgbouncer', 'postgres-continuous-discovery', 'patroni', 'patroni-continuous-discovery', 'patroni-namespace-discovery', 'pgpool')), CHECK (md_group ~ E'\\w+'), - CHECK (md_password_type in ('plain-text', 'aes-gcm-256')) + CHECK (md_encryption in ('plain-text', 'aes-gcm-256')) ); --- prevent multiple active workers for the same db -create unique index if not exists monitored_db_md_hostname_md_port_md_dbname_md_is_enabled_idx on monitored_db(md_hostname, md_port, md_dbname, md_is_enabled) where not md_dbtype ~ 'patroni'; - - alter table pgwatch3.monitored_db add constraint preset_or_custom_config check ((not (md_preset_config_name is null and md_config is null)) and not (md_preset_config_name is not null and md_config is not null)), @@ -87,7 +71,7 @@ create table if not exists metric ( create table if not exists metric_attribute ( ma_metric_name text not null primary key, ma_last_modified_on timestamptz not null default now(), - ma_metric_attrs jsonb not null, + ma_metric_attrs jsonb not null, check (ma_metric_name ~ E'^[a-z0-9_\\.]+$') ); @@ -409,5 +393,5 @@ insert into pgwatch3.preset_config (pc_name, pc_description, pc_config) }'); /* one host for demo purposes, so that "docker run" could immediately show some graphs */ ---insert into pgwatch3.monitored_db (md_unique_name, md_preset_config_name, md_config, md_hostname, md_port, md_dbname, md_user, md_password) +--insert into pgwatch3.monitored_db (md_name, md_preset_config_name, md_config, md_hostname, md_port, md_dbname, md_user, md_password) -- values ('test', 'exhaustive', null, 'localhost', '5432', 'pgwatch3', 'pgwatch3', 'pgwatch3admin'); diff --git a/src/logparse.go b/src/logparse.go index 881be39696..bf755b8a33 100644 --- a/src/logparse.go +++ b/src/logparse.go @@ -101,7 +101,7 @@ func eventCountsToMetricStoreMessages(eventCounts, eventCountsTotal map[string]i } } allSeverityCounts["epoch_ns"] = time.Now().UnixNano() - return []metrics.MetricStoreMessage{{DBUniqueName: mdb.DBUniqueName, DBType: mdb.DBType, + return []metrics.MetricStoreMessage{{DBName: mdb.DBUniqueName, DBType: mdb.DBType, MetricName: specialMetricServerLogEventCounts, Data: metrics.MetricData{allSeverityCounts}, CustomTags: mdb.CustomTags}} } @@ -368,7 +368,7 @@ func tryDetermineLogFolder(mdb MonitoredDatabase) string { sql := `select current_setting('data_directory') as dd, current_setting('log_directory') as ld` logger.Infof("[%s] Trying to determine server logs folder via SQL as host_config.logs_glob_path not specified...", mdb.DBUniqueName) - data, err := DBExecReadByDbUniqueName(mainContext, mdb.DBUniqueName, 0, sql) + data, err := DBExecReadByDbUniqueName(mainContext, mdb.DBUniqueName, sql) if err != nil { logger.Errorf("[%s] Failed to query data_directory and log_directory settings...are you superuser or have pg_monitor grant?", mdb.DBUniqueName) return "" @@ -386,7 +386,7 @@ func tryDetermineLogMessagesLanguage(mdb MonitoredDatabase) string { sql := `select current_setting('lc_messages')::varchar(2) as lc_messages;` logger.Debugf("[%s] Trying to determine server log messages language...", mdb.DBUniqueName) - data, err := DBExecReadByDbUniqueName(mainContext, mdb.DBUniqueName, 0, sql) + data, err := DBExecReadByDbUniqueName(mainContext, mdb.DBUniqueName, sql) if err != nil { logger.Errorf("[%s] Failed to lc_messages settings: %s", mdb.DBUniqueName, err) return "" diff --git a/src/main.go b/src/main.go index f6b5f73acc..ad450ab723 100644 --- a/src/main.go +++ b/src/main.go @@ -36,6 +36,7 @@ import ( "github.com/cybertec-postgresql/pgwatch3/metrics/sinks" "github.com/cybertec-postgresql/pgwatch3/psutil" "github.com/cybertec-postgresql/pgwatch3/webserver" + "github.com/jackc/pgx/v5" "github.com/shopspring/decimal" "github.com/sirupsen/logrus" @@ -48,20 +49,10 @@ type MonitoredDatabase struct { DBUniqueName string `yaml:"unique_name"` DBUniqueNameOrig string // to preserve belonging to a specific instance for continuous modes where DBUniqueName will be dynamic Group string - Host string - Port string - DBName string - User string - Password string - PasswordType string `yaml:"password_type"` - LibPQConnStr string `yaml:"libpq_conn_str"` - SslMode string - SslRootCAPath string `yaml:"sslrootcert"` - SslClientCertPath string `yaml:"sslcert"` - SslClientKeyPath string `yaml:"sslkey"` + Encryption string `yaml:"encryption"` + ConnStr string `yaml:"conn_str"` Metrics map[string]float64 `yaml:"custom_metrics"` MetricsStandby map[string]float64 `yaml:"custom_metrics_standby"` - StmtTimeout int64 `yaml:"stmt_timeout"` DBType string DBNameIncludePattern string `yaml:"dbname_include_pattern"` DBNameExcludePattern string `yaml:"dbname_exclude_pattern"` @@ -74,6 +65,13 @@ type MonitoredDatabase struct { OnlyIfMaster bool `yaml:"only_if_master"` } +func (md MonitoredDatabase) GetDatabaseName() string { + if conf, err := pgx.ParseConfig(md.ConnStr); err == nil { + return conf.Database + } + return "" +} + type HostConfigAttrs struct { DcsType string `yaml:"dcs_type"` DcsEndpoints []string `yaml:"dcs_endpoints"` @@ -177,14 +175,13 @@ const ( metricPsutilDisk = "psutil_disk" metricPsutilDiskIoTotal = "psutil_disk_io_total" metricPsutilMem = "psutil_mem" - defaultMetricsDefinitionPathPkg = "/etc/pgwatch3/metrics" // prebuilt packages / Docker default location - defaultMetricsDefinitionPathDocker = "/pgwatch3/metrics" // prebuilt packages / Docker default location - dbSizeCachingInterval = 30 * time.Minute - dbMetricJoinStr = "¤¤¤" // just some unlikely string for a DB name to avoid using maps of maps for DB+metric data - execEnvUnknown = "UNKNOWN" - execEnvAzureSingle = "AZURE_SINGLE" - execEnvAzureFlexible = "AZURE_FLEXIBLE" - execEnvGoogle = "GOOGLE" + + dbSizeCachingInterval = 30 * time.Minute + dbMetricJoinStr = "¤¤¤" // just some unlikely string for a DB name to avoid using maps of maps for DB+metric data + execEnvUnknown = "UNKNOWN" + execEnvAzureSingle = "AZURE_SINGLE" + execEnvAzureFlexible = "AZURE_FLEXIBLE" + execEnvGoogle = "GOOGLE" ) var dbTypeMap = map[string]bool{config.DbTypePg: true, config.DbTypePgCont: true, config.DbTypeBouncer: true, config.DbTypePatroni: true, config.DbTypePatroniCont: true, config.DbTypePgPOOL: true, config.DbTypePatroniNamespaceDiscovery: true} @@ -334,42 +331,33 @@ func GetMonitoredDatabasesFromConfigDB() ([]MonitoredDatabase, error) { metricConfig, err := jsonTextToMap(row["md_config"].(string)) if err != nil { - logger.Warningf("Cannot parse metrics JSON config for \"%s\": %v", row["md_unique_name"].(string), err) + logger.Warningf("Cannot parse metrics JSON config for \"%s\": %v", row["md_name"].(string), err) continue } metricConfigStandby := make(map[string]float64) if configStandby, ok := row["md_config_standby"]; ok { metricConfigStandby, err = jsonTextToMap(configStandby.(string)) if err != nil { - logger.Warningf("Cannot parse standby metrics JSON config for \"%s\". Ignoring standby config: %v", row["md_unique_name"].(string), err) + logger.Warningf("Cannot parse standby metrics JSON config for \"%s\". Ignoring standby config: %v", row["md_name"].(string), err) } } customTags, err := jsonTextToStringMap(row["md_custom_tags"].(string)) if err != nil { - logger.Warningf("Cannot parse custom tags JSON for \"%s\". Ignoring custom tags. Error: %v", row["md_unique_name"].(string), err) + logger.Warningf("Cannot parse custom tags JSON for \"%s\". Ignoring custom tags. Error: %v", row["md_name"].(string), err) customTags = nil } hostConfigAttrs := HostConfigAttrs{} err = yaml.Unmarshal([]byte(row["md_host_config"].(string)), &hostConfigAttrs) if err != nil { - logger.Warningf("Cannot parse host config JSON for \"%s\". Ignoring host config. Error: %v", row["md_unique_name"].(string), err) + logger.Warningf("Cannot parse host config JSON for \"%s\". Ignoring host config. Error: %v", row["md_name"].(string), err) } md := MonitoredDatabase{ - DBUniqueName: row["md_unique_name"].(string), - DBUniqueNameOrig: row["md_unique_name"].(string), - Host: row["md_hostname"].(string), - Port: row["md_port"].(string), - DBName: row["md_dbname"].(string), - User: row["md_user"].(string), + DBUniqueName: row["md_name"].(string), + DBUniqueNameOrig: row["md_name"].(string), IsSuperuser: row["md_is_superuser"].(bool), - Password: row["md_password"].(string), - PasswordType: row["md_password_type"].(string), - SslMode: row["md_sslmode"].(string), - SslRootCAPath: row["md_root_ca_path"].(string), - SslClientCertPath: row["md_client_cert_path"].(string), - SslClientKeyPath: row["md_client_key_path"].(string), - StmtTimeout: int64(row["md_statement_timeout_seconds"].(int32)), + ConnStr: row["md_connstr"].(string), + Encryption: row["md_encryption"].(string), Metrics: metricConfig, MetricsStandby: metricConfigStandby, DBType: row["md_dbtype"].(string), @@ -385,15 +373,15 @@ func GetMonitoredDatabasesFromConfigDB() ([]MonitoredDatabase, error) { continue } - if md.PasswordType == "aes-gcm-256" && opts.AesGcmKeyphrase != "" { - md.Password = decrypt(md.DBUniqueName, opts.AesGcmKeyphrase, md.Password) + if md.Encryption == "aes-gcm-256" && opts.AesGcmKeyphrase != "" { + md.ConnStr = decrypt(md.DBUniqueName, opts.AesGcmKeyphrase, md.ConnStr) } if md.DBType == config.DbTypePgCont { resolved, err := ResolveDatabasesFromConfigEntry(md) if err != nil { logger.Errorf("Failed to resolve DBs for \"%s\": %s", md.DBUniqueName, err) - if md.PasswordType == "aes-gcm-256" && opts.AesGcmKeyphrase == "" { + if md.Encryption == "aes-gcm-256" && opts.AesGcmKeyphrase == "" { logger.Errorf("No decryption key set. Use the --aes-gcm-keyphrase or --aes-gcm-keyphrase params to set") } continue @@ -401,7 +389,7 @@ func GetMonitoredDatabasesFromConfigDB() ([]MonitoredDatabase, error) { tempArr := make([]string, 0) for _, rdb := range resolved { monitoredDBs = append(monitoredDBs, rdb) - tempArr = append(tempArr, rdb.DBName) + tempArr = append(tempArr, rdb.ConnStr) } logger.Debugf("Resolved %d DBs with prefix \"%s\": [%s]", len(resolved), md.DBUniqueName, strings.Join(tempArr, ", ")) } else if md.DBType == config.DbTypePatroni || md.DBType == config.DbTypePatroniCont || md.DBType == config.DbTypePatroniNamespaceDiscovery { @@ -413,7 +401,7 @@ func GetMonitoredDatabasesFromConfigDB() ([]MonitoredDatabase, error) { tempArr := make([]string, 0) for _, rdb := range resolved { monitoredDBs = append(monitoredDBs, rdb) - tempArr = append(tempArr, rdb.DBName) + tempArr = append(tempArr, rdb.ConnStr) } logger.Debugf("Resolved %d DBs with prefix \"%s\": [%s]", len(resolved), md.DBUniqueName, strings.Join(tempArr, ", ")) } else { @@ -598,7 +586,7 @@ func GetRecommendations(dbUnique string, vme DBVersionMapEntry) (metrics.MetricD logger.Debugf("Processing %d recommendation metrics for \"%s\"", len(recoMetrics), dbUnique) for m, mvp := range recoMetrics { - data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.SQL) + data, err := DBExecReadByDbUniqueName(mainContext, dbUnique, mvp.SQL) if err != nil { if strings.Contains(err.Error(), "does not exist") { // some more exotic extensions missing is expected, don't pollute the error log logger.Infof("[%s:%s] Could not execute recommendations SQL: %v", dbUnique, m, err) @@ -750,7 +738,7 @@ retry_with_superuser_sql: // if 1st fetch with normal SQL fails, try with SU SQL return nil, err } } else { - data, err = DBExecReadByDbUniqueName(mainContext, msg.DBUniqueName, mvp.MetricAttrs.StatementTimeoutSeconds, sql) + data, err = DBExecReadByDbUniqueName(mainContext, msg.DBUniqueName, sql) if err != nil { // let's soften errors to "info" from functions that expect the server to be a primary to reduce noise @@ -796,7 +784,7 @@ retry_with_superuser_sql: // if 1st fetch with normal SQL fails, try with SU SQL logger.WithFields(map[string]any{"database": msg.DBUniqueName, "metric": msg.MetricName, "rows": len(data)}).Info("measurements fetched") if regexIsPgbouncerMetrics.MatchString(msg.MetricName) { // clean unwanted pgbouncer pool stats here as not possible in SQL - data = FilterPgbouncerData(data, md.DBName, vme) + data = FilterPgbouncerData(data, md.GetDatabaseName(), vme) } ClearDBUnreachableStateIfAny(msg.DBUniqueName) @@ -828,11 +816,11 @@ send_to_storageChannel: } logger.Infof("[%s:%s] loaded %d rows from the instance cache", msg.DBUniqueName, msg.MetricName, len(cachedData)) atomic.AddUint64(&totalMetricsReusedFromCacheCounter, uint64(len(cachedData))) - return []metrics.MetricStoreMessage{{DBUniqueName: msg.DBUniqueName, MetricName: msg.MetricName, Data: cachedData, CustomTags: md.CustomTags, + return []metrics.MetricStoreMessage{{DBName: msg.DBUniqueName, MetricName: msg.MetricName, Data: cachedData, CustomTags: md.CustomTags, MetricDefinitionDetails: mvp, RealDbname: vme.RealDbname, SystemIdentifier: vme.SystemIdentifier}}, nil } atomic.AddUint64(&totalMetricsFetchedCounter, uint64(len(data))) - return []metrics.MetricStoreMessage{{DBUniqueName: msg.DBUniqueName, MetricName: msg.MetricName, Data: data, CustomTags: md.CustomTags, + return []metrics.MetricStoreMessage{{DBName: msg.DBUniqueName, MetricName: msg.MetricName, Data: data, CustomTags: md.CustomTags, MetricDefinitionDetails: mvp, RealDbname: vme.RealDbname, SystemIdentifier: vme.SystemIdentifier}}, nil } @@ -1051,7 +1039,7 @@ func MetricGathererLoop(ctx context.Context, dbUniqueName, dbUniqueNameOrig, dbT entry := metrics.MetricEntry{"details": message, "epoch_ns": (metricStoreMessages[0].Data)[0]["epoch_ns"]} detectedChangesSummary = append(detectedChangesSummary, entry) metricStoreMessages = append(metricStoreMessages, - metrics.MetricStoreMessage{DBUniqueName: dbUniqueName, DBType: dbType, + metrics.MetricStoreMessage{DBName: dbUniqueName, DBType: dbType, MetricName: "object_changes", Data: detectedChangesSummary, CustomTags: metricStoreMessages[0].CustomTags}) } } @@ -1116,7 +1104,7 @@ func DatarowsToMetricstoreMessage(data metrics.MetricData, msg MetricFetchMessag atomic.AddUint64(&totalMetricsFetchedCounter, uint64(len(data))) return metrics.MetricStoreMessage{ - DBUniqueName: msg.DBUniqueName, + DBName: msg.DBUniqueName, DBType: msg.DBType, MetricName: msg.MetricName, CustomTags: md.CustomTags, @@ -1372,20 +1360,8 @@ func ReadPresetMetricsConfigFromFolder(folder string, _ bool) (map[string]map[st func ExpandEnvVarsForConfigEntryIfStartsWithDollar(md MonitoredDatabase) (MonitoredDatabase, int) { var changed int - if strings.HasPrefix(md.DBName, "$") { - md.DBName = os.ExpandEnv(md.DBName) - changed++ - } - if strings.HasPrefix(md.User, "$") { - md.User = os.ExpandEnv(md.User) - changed++ - } - if strings.HasPrefix(md.Password, "$") { - md.Password = os.ExpandEnv(md.Password) - changed++ - } - if strings.HasPrefix(md.PasswordType, "$") { - md.PasswordType = os.ExpandEnv(md.PasswordType) + if strings.HasPrefix(md.Encryption, "$") { + md.Encryption = os.ExpandEnv(md.Encryption) changed++ } if strings.HasPrefix(md.DBType, "$") { @@ -1396,10 +1372,6 @@ func ExpandEnvVarsForConfigEntryIfStartsWithDollar(md MonitoredDatabase) (Monito md.DBUniqueName = os.ExpandEnv(md.DBUniqueName) changed++ } - if strings.HasPrefix(md.SslMode, "$") { - md.SslMode = os.ExpandEnv(md.SslMode) - changed++ - } if strings.HasPrefix(md.DBNameIncludePattern, "$") { md.DBNameIncludePattern = os.ExpandEnv(md.DBNameIncludePattern) changed++ @@ -1438,9 +1410,6 @@ func ConfigFileToMonitoredDatabases(configFilePath string) ([]MonitoredDatabase, return hostList, err } for _, v := range c { - if v.Port == "" { - v.Port = "5432" - } if v.DBType == "" { v.DBType = config.DbTypePg } @@ -1449,9 +1418,6 @@ func ConfigFileToMonitoredDatabases(configFilePath string) ([]MonitoredDatabase, if v.Group == "" { v.Group = "default" } - if v.StmtTimeout == 0 { - v.StmtTimeout = 5 - } vExp, changed := ExpandEnvVarsForConfigEntryIfStartsWithDollar(v) if changed > 0 { logger.Debugf("[%s] %d config attributes expanded from ENV", vExp.DBUniqueName, changed) @@ -1522,20 +1488,20 @@ func GetMonitoredDatabasesFromMonitoringConfig(mc []MonitoredDatabase) []Monitor logger.Warningf("Ignoring host \"%s\" - unknown dbtype: %s. Expected one of: %+v", e.DBUniqueName, e.DBType, dbTypes) continue } - if e.IsEnabled && e.PasswordType == "aes-gcm-256" && opts.AesGcmKeyphrase != "" { - e.Password = decrypt(e.DBUniqueName, opts.AesGcmKeyphrase, e.Password) + if e.IsEnabled && e.Encryption == "aes-gcm-256" && opts.AesGcmKeyphrase != "" { + e.ConnStr = decrypt(e.DBUniqueName, opts.AesGcmKeyphrase, e.ConnStr) } - if e.DBType == config.DbTypePatroni && e.DBName == "" { + if e.DBType == config.DbTypePatroni && e.GetDatabaseName() == "" { logger.Warningf("Ignoring host \"%s\" as \"dbname\" attribute not specified but required by dbtype=patroni", e.DBUniqueName) continue } - if e.DBType == config.DbTypePg && e.DBName == "" { + if e.DBType == config.DbTypePg && e.GetDatabaseName() == "" { logger.Warningf("Ignoring host \"%s\" as \"dbname\" attribute not specified but required by dbtype=postgres", e.DBUniqueName) continue } - if len(e.DBName) == 0 || e.DBType == config.DbTypePgCont || e.DBType == config.DbTypePatroni || e.DBType == config.DbTypePatroniCont || e.DBType == config.DbTypePatroniNamespaceDiscovery { + if len(e.GetDatabaseName()) == 0 || e.DBType == config.DbTypePgCont || e.DBType == config.DbTypePatroni || e.DBType == config.DbTypePatroniCont || e.DBType == config.DbTypePatroniNamespaceDiscovery { if e.DBType == config.DbTypePgCont { - logger.Debugf("Adding \"%s\" (host=%s, port=%s) to continuous monitoring ...", e.DBUniqueName, e.Host, e.Port) + logger.Debugf("Adding \"%s\" (host=%s, port=%s) to continuous monitoring ...", e.DBUniqueName, e.ConnStr) } var foundDbs []MonitoredDatabase var err error @@ -1552,7 +1518,7 @@ func GetMonitoredDatabasesFromMonitoringConfig(mc []MonitoredDatabase) []Monitor tempArr := make([]string, 0) for _, r := range foundDbs { md = append(md, r) - tempArr = append(tempArr, r.DBName) + tempArr = append(tempArr, r.GetDatabaseName()) } logger.Debugf("Resolved %d DBs with prefix \"%s\": [%s]", len(foundDbs), e.DBUniqueName, strings.Join(tempArr, ", ")) } else { @@ -1712,9 +1678,9 @@ func SyncMonitoredDBsToDatastore(ctx context.Context, monitoredDbs []MonitoredDa db["tag_"+k] = v } msms = append(msms, metrics.MetricStoreMessage{ - DBUniqueName: mdb.DBUniqueName, - MetricName: monitoredDbsDatastoreSyncMetricName, - Data: metrics.MetricData{db}, + DBName: mdb.DBUniqueName, + MetricName: monitoredDbsDatastoreSyncMetricName, + Data: metrics.MetricData{db}, }) } select { @@ -1726,13 +1692,6 @@ func SyncMonitoredDBsToDatastore(ctx context.Context, monitoredDbs []MonitoredDa } } -func CheckFolderExistsAndReadable(path string) bool { - if _, err := os.ReadDir(path); err != nil { - return false - } - return true -} - func shouldDbBeMonitoredBasedOnCurrentState(md MonitoredDatabase) bool { return !IsDBDormant(md.DBUniqueName) } @@ -1915,68 +1874,22 @@ func main() { } // running in config file based mode? - if len(opts.Config) > 0 { - if opts.Metric.MetricsFolder == "" && CheckFolderExistsAndReadable(defaultMetricsDefinitionPathPkg) { - opts.Metric.MetricsFolder = defaultMetricsDefinitionPathPkg - logger.Warningf("--metrics-folder path not specified, using %s", opts.Metric.MetricsFolder) - } else if opts.Metric.MetricsFolder == "" && CheckFolderExistsAndReadable(defaultMetricsDefinitionPathDocker) { - opts.Metric.MetricsFolder = defaultMetricsDefinitionPathDocker - logger.Warningf("--metrics-folder path not specified, using %s", opts.Metric.MetricsFolder) - } else { - if !CheckFolderExistsAndReadable(opts.Metric.MetricsFolder) { - logger.Fatalf("Could not read --metrics-folder path %s", opts.Metric.MetricsFolder) - } - } - - if !opts.IsAdHocMode() { - fi, err := os.Stat(opts.Config) - if err != nil { - logger.Fatalf("Could not Stat() path %s: %s", opts.Config, err) - } - switch mode := fi.Mode(); { - case mode.IsDir(): - _, err := os.ReadDir(opts.Config) - if err != nil { - logger.Fatalf("Could not read path %s: %s", opts.Config, err) - } - case mode.IsRegular(): - _, err := os.ReadFile(opts.Config) - if err != nil { - logger.Fatalf("Could not read path %s: %s", opts.Config, err) - } - } - } - - fileBasedMetrics = true - } else if opts.IsAdHocMode() && opts.Metric.MetricsFolder != "" && CheckFolderExistsAndReadable(opts.Metric.MetricsFolder) { - // don't need the Config DB connection actually for ad-hoc mode if metric definitions are there - fileBasedMetrics = true - } else if opts.IsAdHocMode() && opts.Metric.MetricsFolder == "" && (CheckFolderExistsAndReadable(defaultMetricsDefinitionPathPkg) || CheckFolderExistsAndReadable(defaultMetricsDefinitionPathDocker)) { - if CheckFolderExistsAndReadable(defaultMetricsDefinitionPathPkg) { - opts.Metric.MetricsFolder = defaultMetricsDefinitionPathPkg - } else if CheckFolderExistsAndReadable(defaultMetricsDefinitionPathDocker) { - opts.Metric.MetricsFolder = defaultMetricsDefinitionPathDocker - } - logger.Warningf("--metrics-folder path not specified, using %s", opts.Metric.MetricsFolder) + configKind, err := opts.GetConfigKind() + switch { + case err != nil: + logger.Fatal(err) + case configKind == config.ConfigFile || configKind == config.ConfigFolder: fileBasedMetrics = true - } else { // normal "Config DB" mode - // make sure all PG params are there - if opts.Connection.User == "" { - opts.Connection.User = os.Getenv("USER") - } - if opts.Connection.Host == "" || opts.Connection.Port == "" || opts.Connection.Dbname == "" || opts.Connection.User == "" { - fmt.Println("Check config DB parameters") - return - } - - configDb, err = db.InitAndTestConfigStoreConnection(mainContext, opts.Connection.Host, - opts.Connection.Port, opts.Connection.Dbname, opts.Connection.User, opts.Connection.Password, - opts.Connection.PgRequireSSL) - if err != nil { + case configKind == config.ConfigPgURL: + if configDb, err = db.InitAndTestConfigStoreConnection(mainContext, opts.Connection.Config); err != nil { logger.WithError(err).Fatal("Could not connect to configuration database") } } + if opts.Connection.Init { + return + } + pgBouncerNumericCountersStartVersion = VersionToInt("1.12") if !opts.Ping { @@ -1995,8 +1908,7 @@ func main() { go MetricsBatcher(mainContext, opts.BatchingDelayMs, bufferedPersistCh, persistCh) } - metricsWriter, err = sinks.NewMultiWriter(mainContext, opts) - if err != nil { + if metricsWriter, err = sinks.NewMultiWriter(mainContext, opts); err != nil { logger.Fatal(err) } go metricsWriter.WriteMetrics(mainContext, persistCh) @@ -2004,9 +1916,6 @@ func main() { _, _ = daemon.SdNotify(false, "READY=1") // Notify systemd, does nothing outside of systemd } - if opts.Connection.Init { - return - } firstLoop := true mainLoopCount := 0 var monitoredDbs []MonitoredDatabase @@ -2060,7 +1969,7 @@ func main() { logger.Fatalf("Could not parse --adhoc-config(%s): %v", opts.AdHocConfig, err) } } - md := MonitoredDatabase{DBUniqueName: opts.AdHocUniqueName, DBType: opts.AdHocDBType, Metrics: adhocconfig, LibPQConnStr: opts.AdHocConnString} + md := MonitoredDatabase{DBUniqueName: opts.AdHocUniqueName, DBType: opts.AdHocDBType, Metrics: adhocconfig, ConnStr: opts.AdHocConnString} if opts.AdHocDBType == config.DbTypePg { monitoredDbs = []MonitoredDatabase{md} } else { @@ -2076,7 +1985,7 @@ func main() { } } } else { - mc, err := ReadMonitoringConfigFromFileOrFolder(opts.Config) + mc, err := ReadMonitoringConfigFromFileOrFolder(opts.Connection.Config) if err == nil { logger.Debugf("Found %d monitoring config entries", len(mc)) if len(opts.Metric.Group) > 0 { @@ -2088,9 +1997,9 @@ func main() { logger.Debugf("Found %d databases to monitor from %d config items...", len(monitoredDbs), len(mc)) } else { if firstLoop { - logger.Fatalf("Could not read/parse monitoring config from path: %s. err: %v", opts.Config, err) + logger.Fatalf("Could not read/parse monitoring config from path: %s. err: %v", opts.Connection.Config, err) } else { - logger.Errorf("Could not read/parse monitoring config from path: %s. using last valid config data. err: %v", opts.Config, err) + logger.Errorf("Could not read/parse monitoring config from path: %s. using last valid config data. err: %v", opts.Connection.Config, err) } time.Sleep(time.Second * time.Duration(opts.Connection.ServersRefreshLoopSeconds)) continue @@ -2151,7 +2060,7 @@ func main() { metricConfig = host.Metrics wasInstancePreviouslyDormant := IsDBDormant(dbUnique) - if host.PasswordType == "aes-gcm-256" && len(opts.AesGcmKeyphrase) == 0 && len(opts.AesGcmKeyphraseFile) == 0 { + if host.Encryption == "aes-gcm-256" && len(opts.AesGcmKeyphrase) == 0 && len(opts.AesGcmKeyphraseFile) == 0 { // Warn if any encrypted hosts found but no keyphrase given logger.Warningf("Encrypted password type found for host \"%s\", but no decryption keyphrase specified. Use --aes-gcm-keyphrase or --aes-gcm-keyphrase-file params", dbUnique) } diff --git a/src/metrics/sinks/json_file.go b/src/metrics/sinks/json_file.go index 455d488880..aa7b3065de 100644 --- a/src/metrics/sinks/json_file.go +++ b/src/metrics/sinks/json_file.go @@ -53,7 +53,7 @@ func (jw *JSONWriter) Write(msgs []metrics.MetricStoreMessage) error { dataRow := map[string]any{ "metric": msg.MetricName, "data": msg.Data, - "dbname": msg.DBUniqueName, + "dbname": msg.DBName, "custom_tags": msg.CustomTags, } if jw.RealDbnameField != "" && msg.RealDbname != "" { diff --git a/src/metrics/sinks/postgres.go b/src/metrics/sinks/postgres.go index c3a73f06f4..ce20803248 100644 --- a/src/metrics/sinks/postgres.go +++ b/src/metrics/sinks/postgres.go @@ -156,7 +156,7 @@ func (pgw *PostgresWriter) Write(msgs []metrics.MetricStoreMessage) error { if !ok { metricsToStorePerMetric[metricNameTemp] = make([]metrics.MetricStoreMessagePostgres, 0) } - metricsArr = append(metricsArr, metrics.MetricStoreMessagePostgres{Time: epochTime, DBName: msg.DBUniqueName, + metricsArr = append(metricsArr, metrics.MetricStoreMessagePostgres{Time: epochTime, DBName: msg.DBName, Metric: msg.MetricName, Data: fields, TagData: tags}) metricsToStorePerMetric[metricNameTemp] = metricsArr @@ -178,14 +178,14 @@ func (pgw *PostgresWriter) Write(msgs []metrics.MetricStoreMessage) error { if !ok { pgPartBoundsDbName[msg.MetricName] = make(map[string]ExistingPartitionInfo) } - bounds, ok := pgPartBoundsDbName[msg.MetricName][msg.DBUniqueName] + bounds, ok := pgPartBoundsDbName[msg.MetricName][msg.DBName] if !ok || (ok && epochTime.Before(bounds.StartTime)) { bounds.StartTime = epochTime - pgPartBoundsDbName[msg.MetricName][msg.DBUniqueName] = bounds + pgPartBoundsDbName[msg.MetricName][msg.DBName] = bounds } if !ok || (ok && epochTime.After(bounds.EndTime)) { bounds.EndTime = epochTime - pgPartBoundsDbName[msg.MetricName][msg.DBUniqueName] = bounds + pgPartBoundsDbName[msg.MetricName][msg.DBName] = bounds } } } @@ -259,7 +259,7 @@ func (pgw *PostgresWriter) Write(msgs []metrics.MetricStoreMessage) error { if err == nil { if len(msgs) == 1 { logger.Infof("wrote %d/%d rows to Postgres for [%s:%s] in %.1f ms", rowsBatched, totalRows, - msgs[0].DBUniqueName, msgs[0].MetricName, float64(diff.Nanoseconds())/1000000) + msgs[0].DBName, msgs[0].MetricName, float64(diff.Nanoseconds())/1000000) } else { logger.Infof("wrote %d/%d rows from %d metric sets to Postgres in %.1f ms", rowsBatched, totalRows, len(msgs), float64(diff.Nanoseconds())/1000000) diff --git a/src/metrics/sinks/prometheus.go b/src/metrics/sinks/prometheus.go index b31cab1234..f160e5cc61 100644 --- a/src/metrics/sinks/prometheus.go +++ b/src/metrics/sinks/prometheus.go @@ -74,7 +74,7 @@ func (promw *PrometheusWriter) Write(msgs []metrics.MetricStoreMessage) error { return nil } msg := msgs[0] - promw.PromAsyncCacheAddMetricData(msg.DBUniqueName, msg.MetricName, msgs) + promw.PromAsyncCacheAddMetricData(msg.DBName, msg.MetricName, msgs) return nil } @@ -174,7 +174,7 @@ func (promw *PrometheusWriter) setInstanceUpDownState(ch chan<- prometheus.Metri data[epochColumnName] = time.Now().UnixNano() pm := promw.MetricStoreMessageToPromMetrics(metrics.MetricStoreMessage{ - DBUniqueName: dbName, + DBName: dbName, DBType: "postgres", //md.DBType, MetricName: promInstanceUpStateMetric, CustomTags: nil, //md.CustomTags, @@ -211,8 +211,8 @@ func (promw *PrometheusWriter) MetricStoreMessageToPromMetrics(msg metrics.Metri epochTime = time.Unix(0, epochNs) if epochTime.Before(epochNow.Add(-1 * promScrapingStalenessHardDropLimit)) { - logger.Warningf("[%s][%s] Dropping metric set due to staleness (>%v) ...", msg.DBUniqueName, msg.MetricName, promScrapingStalenessHardDropLimit) - promw.PurgeMetricsFromPromAsyncCacheIfAny(msg.DBUniqueName, msg.MetricName) + logger.Warningf("[%s][%s] Dropping metric set due to staleness (>%v) ...", msg.DBName, msg.MetricName, promScrapingStalenessHardDropLimit) + promw.PurgeMetricsFromPromAsyncCacheIfAny(msg.DBName, msg.MetricName) return promMetrics } } @@ -220,7 +220,7 @@ func (promw *PrometheusWriter) MetricStoreMessageToPromMetrics(msg metrics.Metri for _, dr := range msg.Data { labels := make(map[string]string) fields := make(map[string]float64) - labels["dbname"] = msg.DBUniqueName + labels["dbname"] = msg.DBName for k, v := range dr { if v == nil || v == "" || k == epochColumnName { @@ -235,7 +235,7 @@ func (promw *PrometheusWriter) MetricStoreMessageToPromMetrics(msg metrics.Metri if dataType == "float64" || dataType == "float32" || dataType == "int64" || dataType == "int32" || dataType == "int" { f, err := strconv.ParseFloat(fmt.Sprintf("%v", v), 64) if err != nil { - logger.Debugf("Skipping scraping column %s of [%s:%s]: %v", k, msg.DBUniqueName, msg.MetricName, err) + logger.Debugf("Skipping scraping column %s of [%s:%s]: %v", k, msg.DBName, msg.MetricName, err) } fields[k] = f } else if dataType == "bool" { @@ -245,7 +245,7 @@ func (promw *PrometheusWriter) MetricStoreMessageToPromMetrics(msg metrics.Metri fields[k] = 0 } } else { - logger.Debugf("Skipping scraping column %s of [%s:%s], unsupported datatype: %s", k, msg.DBUniqueName, msg.MetricName, dataType) + logger.Debugf("Skipping scraping column %s of [%s:%s], unsupported datatype: %s", k, msg.DBName, msg.MetricName, dataType) continue } } diff --git a/src/metrics/types.go b/src/metrics/types.go index 6097ab3b32..1aefe9e329 100644 --- a/src/metrics/types.go +++ b/src/metrics/types.go @@ -44,7 +44,7 @@ type MetricEntry map[string]any type MetricData []map[string]any type MetricStoreMessage struct { - DBUniqueName string + DBName string DBType string MetricName string CustomTags map[string]string diff --git a/src/patroni.go b/src/patroni.go index 83d210ee3f..907c607d98 100644 --- a/src/patroni.go +++ b/src/patroni.go @@ -6,14 +6,17 @@ import ( "crypto/x509" "errors" "fmt" + "net/url" "os" "path" "regexp" + "strconv" "time" "github.com/cybertec-postgresql/pgwatch3/config" "github.com/cybertec-postgresql/pgwatch3/db" consul_api "github.com/hashicorp/consul/api" + "github.com/jackc/pgx/v5" "github.com/samuel/go-zookeeper/zk" client "go.etcd.io/etcd/client/v3" ) @@ -147,7 +150,7 @@ func getEtcdClusterMembers(database MonitoredDatabase) ([]PatroniClusterMember, kapi := c.KV if database.DBType == config.DbTypePatroniNamespaceDiscovery { // all scopes, all DBs (regex filtering applies if defined) - if len(database.DBName) > 0 { + if len(database.GetDatabaseName()) > 0 { return ret, fmt.Errorf("Skipping Patroni entry %s - cannot specify a DB name when monitoring all scopes (regex patterns are supported though)", database.DBUniqueName) } if database.HostConfig.Namespace == "" { @@ -303,31 +306,29 @@ func ResolveDatabasesFromPatroni(ce MonitoredDatabase) ([]MonitoredDatabase, err } else { dbUnique = ce.DBUniqueName + "_" + m.Name } - if ce.DBName != "" { + if ce.GetDatabaseName() != "" { md = append(md, MonitoredDatabase{ - DBUniqueName: dbUnique, - DBUniqueNameOrig: ce.DBUniqueName, - DBName: ce.DBName, - Host: host, - Port: port, - User: ce.User, - Password: ce.Password, - PasswordType: ce.PasswordType, - SslMode: ce.SslMode, - SslRootCAPath: ce.SslRootCAPath, - SslClientCertPath: ce.SslClientCertPath, - SslClientKeyPath: ce.SslClientKeyPath, - StmtTimeout: ce.StmtTimeout, - Metrics: ce.Metrics, - PresetMetrics: ce.PresetMetrics, - IsSuperuser: ce.IsSuperuser, - CustomTags: ce.CustomTags, - HostConfig: ce.HostConfig, - DBType: "postgres"}) + DBUniqueName: dbUnique, + DBUniqueNameOrig: ce.DBUniqueName, + ConnStr: ce.ConnStr, + Encryption: ce.Encryption, + Metrics: ce.Metrics, + PresetMetrics: ce.PresetMetrics, + IsSuperuser: ce.IsSuperuser, + CustomTags: ce.CustomTags, + HostConfig: ce.HostConfig, + DBType: "postgres"}) continue } - c, err := db.GetPostgresDBConnection(mainContext, "", host, port, "template1", ce.User, ce.Password, - ce.SslMode, ce.SslRootCAPath, ce.SslClientCertPath, ce.SslClientKeyPath) + c, err := db.GetPostgresDBConnection(mainContext, ce.ConnStr, + func(c *pgx.ConnConfig) error { + c.Host = host + c.Database = "template1" + i, err := strconv.Atoi(port) + c.Port = uint16(i) + md[len(md)].ConnStr = c.ConnString() + return err + }) if err != nil { logger.Errorf("Could not contact Patroni member [%s:%s]: %v", ce.DBUniqueName, m.Scope, err) continue @@ -349,26 +350,23 @@ func ResolveDatabasesFromPatroni(ce MonitoredDatabase) ([]MonitoredDatabase, err } for _, d := range data { + connURL, err := url.Parse(ce.ConnStr) + if err != nil { + continue + } + connURL.Host = host + ":" + port + connURL.Path = d["datname"].(string) md = append(md, MonitoredDatabase{ - DBUniqueName: dbUnique + "_" + d["datname_escaped"].(string), - DBUniqueNameOrig: dbUnique, - DBName: d["datname"].(string), - Host: host, - Port: port, - User: ce.User, - Password: ce.Password, - PasswordType: ce.PasswordType, - SslMode: ce.SslMode, - SslRootCAPath: ce.SslRootCAPath, - SslClientCertPath: ce.SslClientCertPath, - SslClientKeyPath: ce.SslClientKeyPath, - StmtTimeout: ce.StmtTimeout, - Metrics: ce.Metrics, - PresetMetrics: ce.PresetMetrics, - IsSuperuser: ce.IsSuperuser, - CustomTags: ce.CustomTags, - HostConfig: ce.HostConfig, - DBType: "postgres"}) + DBUniqueName: dbUnique + "_" + d["datname_escaped"].(string), + DBUniqueNameOrig: dbUnique, + ConnStr: connURL.String(), + Encryption: ce.Encryption, + Metrics: ce.Metrics, + PresetMetrics: ce.PresetMetrics, + IsSuperuser: ce.IsSuperuser, + CustomTags: ce.CustomTags, + HostConfig: ce.HostConfig, + DBType: "postgres"}) } } diff --git a/src/webui/src/layout/Dashboard/DbsTable/ModalComponent.tsx b/src/webui/src/layout/Dashboard/DbsTable/ModalComponent.tsx index ad42562ce3..cb8b65fbce 100644 --- a/src/webui/src/layout/Dashboard/DbsTable/ModalComponent.tsx +++ b/src/webui/src/layout/Dashboard/DbsTable/ModalComponent.tsx @@ -1,8 +1,6 @@ import { Dispatch, SetStateAction, useEffect, useState } from "react"; import CloseIcon from "@mui/icons-material/Close"; import DoneIcon from "@mui/icons-material/Done"; -import Visibility from '@mui/icons-material/Visibility'; -import VisibilityOff from '@mui/icons-material/VisibilityOff'; import { Box, Button, @@ -13,8 +11,6 @@ import { DialogContent, DialogTitle, FormControlLabel, - IconButton, - InputAdornment, Stack, TextField, } from "@mui/material"; @@ -22,13 +18,12 @@ import ToggleButton from "@mui/material/ToggleButton"; import ToggleButtonGroup from "@mui/material/ToggleButtonGroup"; import { Controller, FieldPath, FormProvider, SubmitHandler, useForm, useFormContext } from "react-hook-form"; import { useAddDb, useEditDb, useTestConnection } from "queries/Dashboard"; -import { Db, TestConnection, createDbForm } from "queries/types/DbTypes"; +import { Db, createDbForm } from "queries/types/DbTypes"; import { AutocompleteComponent, - AutocompleteConfigComponent, - AutocompleteSslModeComponent + AutocompleteConfigComponent } from "./SelectComponents"; -import { dbTypeOptions, passwordEncryptionOptions, presetConfigsOptions, sslModeOptions } from "./SelectComponentsOptions"; +import { dbTypeOptions, passwordEncryptionOptions, presetConfigsOptions } from "./SelectComponentsOptions"; import { MultilineTextField, SimpleTextField } from "./TextFieldComponents"; type Props = { @@ -53,7 +48,7 @@ export const ModalComponent = ({ open, setOpen, recordData, action }: Props) => break; case "DUPLICATE": reset(); - Object.entries(recordData!).map(([key, value]) => setValue(key as FieldPath, key === "md_unique_name" ? "" : convertValue(value))); + Object.entries(recordData!).map(([key, value]) => setValue(key as FieldPath, key === "md_name" ? "" : convertValue(value))); break; } }, [action, recordData, reset, setValue]); @@ -75,7 +70,7 @@ export const ModalComponent = ({ open, setOpen, recordData, action }: Props) => const onSubmit: SubmitHandler = (result) => { if (action === "EDIT") { editDb.mutate({ - md_unique_name: recordData!.md_unique_name, + md_unique_name: recordData!.md_name, data: result }); } else { @@ -109,7 +104,6 @@ export const ModalComponent = ({ open, setOpen, recordData, action }: Props) => enum Steps { main = "Main", connection = "Connection", - ssl = "SSL", presets = "Presets" } @@ -117,9 +111,8 @@ type StepType = keyof typeof Steps; const defaultStep = Object.keys(Steps)[0] as StepType; const formErrors = { - main: ["md_unique_name", "md_group", "md_dbtype"], - connection: ["md_hostname", "md_port", "md_user", "md_statement_timeout_seconds", "md_dbname", "md_password_type"], - ssl: ["md_sslmode"], + main: ["md_name", "md_group", "md_dbtype"], + connection: ["md_connstr", "md_encryption"], presets: [] }; @@ -129,40 +122,15 @@ const getStepError = (step: StepType, errors: string[]): boolean => { }; const ModalContent = () => { - const { control, formState: { errors }, getValues, setValue, watch } = useFormContext(); + const { control, formState: { errors }, getValues, setValue } = useFormContext(); const [activeStep, setActiveStep] = useState(defaultStep); - const [showPassword, setShowPassword] = useState(false); const testConnection = useTestConnection(); - const handleClickShowPassword = () => setShowPassword((show: boolean) => !show); - const handleValidate = (val: string) => !!val.toString().trim(); const handleTestConnection = () => { - const data: TestConnection = { - host: getValues("md_hostname"), - port: getValues("md_port"), - dbname: getValues("md_dbname"), - user: getValues("md_user"), - password: getValues("md_password"), - connect_timeout: getValues("connection_timeout"), - sslmode: getValues("md_sslmode"), - sslcert: getValues("md_client_cert_path"), - sslkey: getValues("md_client_key_path") - }; - testConnection.mutate(data); - }; - - const mdSslmodeVal = watch("md_sslmode"); - const isSslDisable = !mdSslmodeVal || mdSslmodeVal === "disable"; - - const handleSslChange = (nextValue?: string) => { - if (nextValue === "disable") { - for (const inputName of ["md_root_ca_path", "md_client_cert_path", "md_client_key_path"]) { - setValue(inputName, ""); - } - } + testConnection.mutate(getValues("md_connstr")); }; const copyPresetConfig = (name: FieldPath, value: string) => { @@ -178,7 +146,7 @@ const ModalContent = () => { { ), connection: ( - - ( - - )} - /> - ( - - )} - /> - - - ( - - )} - /> - ( - - )} - /> - - - ( - - )} - /> - ( - - )} - /> - - - ( - - )} - /> - ( - - - {showPassword ? : } - - - ) - }} - /> - )} - /> - - - ( - - )} - /> - ( - - )} - /> - - - - ), - ssl: ( - ( + + )} + /> + ( - )} /> ( - )} /> - - ( - - )} - /> - ( - - )} - /> - + ( + + )} + /> + ), presets: ( diff --git a/src/webui/src/layout/Dashboard/DbsTable/index.tsx b/src/webui/src/layout/Dashboard/DbsTable/index.tsx index 5d5199c027..f4e36127ee 100644 --- a/src/webui/src/layout/Dashboard/DbsTable/index.tsx +++ b/src/webui/src/layout/Dashboard/DbsTable/index.tsx @@ -43,7 +43,7 @@ export const DbsTable = () => { if (status === "error") { const err = error as Error; - return( + return ( ); }; @@ -55,14 +55,14 @@ export const DbsTable = () => { row.md_unique_name} + rows={data} + getRowId={(row) => row.md_name} rowsPerPageOptions={[]} components={{ Toolbar: () => }} disableColumnMenu initialState={{ sorting: { - sortModel: [{ field: "md_unique_name", sort: "asc" }] + sortModel: [{ field: "md_name", sort: "asc" }] } }} /> diff --git a/src/webui/src/layout/common/Grid/GridColumns.tsx b/src/webui/src/layout/common/Grid/GridColumns.tsx index a1a94ea9b5..895bad4c16 100644 --- a/src/webui/src/layout/common/Grid/GridColumns.tsx +++ b/src/webui/src/layout/common/Grid/GridColumns.tsx @@ -136,16 +136,7 @@ export const databasesColumns = ({ }: databasesColumnsProps): GridColDef[] => { return [ { - field: "md_id", - headerName: "ID", - width: 75, - type: "number", - align: "center", - headerAlign: "center", - hide: true - }, - { - field: "md_unique_name", + field: "md_name", headerName: "Unique name", width: 150, align: "center", @@ -160,22 +151,11 @@ export const databasesColumns = ({ hide: true }, { - field: "md_connection", + field: "md_connstr", headerName: "Connection", width: 150, align: "center", headerAlign: "center", - valueGetter(params) { - return (`${params.row.md_hostname}:${params.row.md_port}`); - }, - }, - { - field: "md_dbname", - headerName: "DB dbname", - width: 150, - align: "center", - headerAlign: "center", - hide: true }, { field: "md_include_pattern", @@ -193,13 +173,6 @@ export const databasesColumns = ({ headerAlign: "center", hide: true }, - { - field: "md_user", - headerName: "DB user", - width: 150, - align: "center", - headerAlign: "center" - }, { field: "md_is_superuser", headerName: "Super user?", @@ -211,39 +184,8 @@ export const databasesColumns = ({ hide: true }, { - field: "md_password_type", - headerName: "Password encryption", - width: 150, - align: "center", - headerAlign: "center", - hide: true - }, - { - field: "md_sslmode", - headerName: "SSL Mode", - width: 150, - align: "center", - headerAlign: "center" - }, - { - field: "md_root_ca_path", - headerName: "Root CA", - width: 150, - align: "center", - headerAlign: "center", - hide: true - }, - { - field: "md_client_cert_path", - headerName: "Client cert", - width: 150, - align: "center", - headerAlign: "center", - hide: true - }, - { - field: "md_client_key_path", - headerName: "Client key", + field: "md_encryption", + headerName: "Encryption", width: 150, align: "center", headerAlign: "center", @@ -308,14 +250,6 @@ export const databasesColumns = ({ valueGetter: (params) => JSON.stringify(params.value), hide: true }, - { - field: "md_statement_timeout_seconds", - headerName: "Statement timeout [seconds]", - type: "number", - width: 120, - align: "center", - headerAlign: "center" - }, { field: "md_only_if_master", headerName: "Master mode only?", @@ -360,8 +294,8 @@ export const databasesColumns = ({ setEditData={setEditData} handleModalOpen={handleModalOpen} deleteRecord={deleteRecord} - deleteParameter={params.row.md_unique_name} - warningMessage={`Remove DB "${params.row.md_unique_name}" from monitoring? This does not remove gathered metrics data from InfluxDB, see bottom of page for that`} + deleteParameter={params.row.md_name} + warningMessage={`Remove DB "${params.row.md_name}" from monitoring? This does not remove gathered metrics data from InfluxDB, see bottom of page for that`} >