diff --git a/CHANGELOG.md b/CHANGELOG.md index 2136b81d3..addede367 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan - [#133](https://github.com/kobsio/kobs/pull/133): Improve querie performance to get logs from ClickHouse. - [#137](https://github.com/kobsio/kobs/pull/137): Change log view for the ClickHouse and Elasticsearch plugin. - [#139](https://github.com/kobsio/kobs/pull/139): Update Go and JavaScript dependencies. +- [#140](https://github.com/kobsio/kobs/pull/140): Fill the chart for the distribution of the log lines with zero value. ## [v0.5.0](https://github.com/kobsio/kobs/releases/tag/v0.5.0) (2021-08-03) diff --git a/plugins/clickhouse/pkg/instance/instance.go b/plugins/clickhouse/pkg/instance/instance.go index 7a0ef3a2c..294f16adc 100644 --- a/plugins/clickhouse/pkg/instance/instance.go +++ b/plugins/clickhouse/pkg/instance/instance.go @@ -123,7 +123,8 @@ func (i *Instance) GetLogs(ctx context.Context, query, order, orderBy string, li // used to render the distribution chart, which shows how many documents/rows are available within a bucket. if timeEnd-timeStart > 30 { interval := (timeEnd - timeStart) / 30 - sqlQueryBuckets := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d second) AS interval_data , count(*) AS count_data FROM %s.logs WHERE timestamp >= ? AND timestamp <= ? %s GROUP BY interval_data SETTINGS skip_unavailable_shards = 1", interval, i.database, conditions) + // sqlQueryBuckets := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d second) AS interval_data , count(*) AS count_data FROM %s.logs WHERE timestamp >= ? AND timestamp <= ? %s GROUP BY interval_data SETTINGS skip_unavailable_shards = 1", interval, i.database, conditions) + sqlQueryBuckets := fmt.Sprintf(`SELECT toStartOfInterval(timestamp, INTERVAL %d second) AS interval_data , count(*) AS count_data FROM %s.logs WHERE timestamp >= ? AND timestamp <= ? %s GROUP BY interval_data ORDER BY interval_data WITH FILL FROM toStartOfInterval(FROM_UNIXTIME(%d), INTERVAL %d second) TO toStartOfInterval(FROM_UNIXTIME(%d), INTERVAL %d second) STEP %d SETTINGS skip_unavailable_shards = 1`, interval, i.database, conditions, timeStart, interval, timeEnd, interval, interval) log.WithFields(logrus.Fields{"query": sqlQueryBuckets, "timeStart": timeStart, "timeEnd": timeEnd}).Tracef("sql buckets query") rowsBuckets, err := i.client.QueryContext(ctx, sqlQueryBuckets, time.Unix(timeStart, 0), time.Unix(timeEnd, 0)) if err != nil { diff --git a/plugins/clickhouse/src/components/panel/LogsChart.tsx b/plugins/clickhouse/src/components/panel/LogsChart.tsx index b1af4d9d2..20daeec23 100644 --- a/plugins/clickhouse/src/components/panel/LogsChart.tsx +++ b/plugins/clickhouse/src/components/panel/LogsChart.tsx @@ -83,7 +83,7 @@ const LogsChart: React.FunctionComponent = ({ buckets }: ILogsC {tooltip.data.intervalFormatted}
- Documents: {tooltip.data.count} + Documents: {tooltip.data.count || 0}