diff --git a/distsql/distsql.go b/distsql/distsql.go index 45f8665fac2a7..4034978581471 100644 --- a/distsql/distsql.go +++ b/distsql/distsql.go @@ -16,6 +16,7 @@ package distsql import ( "github.com/juju/errors" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" @@ -54,6 +55,10 @@ func Select(ctx context.Context, sctx sessionctx.Context, kvReq *kv.Request, fie }, nil } + label := metrics.LblGeneral + if sctx.GetSessionVars().InRestrictedSQL { + label = metrics.LblInternal + } return &selectResult{ label: "dag", resp: resp, @@ -63,21 +68,28 @@ func Select(ctx context.Context, sctx sessionctx.Context, kvReq *kv.Request, fie fieldTypes: fieldTypes, ctx: sctx, feedback: fb, + sqlType: label, }, nil } // Analyze do a analyze request. -func Analyze(ctx context.Context, client kv.Client, kvReq *kv.Request, vars *kv.Variables) (SelectResult, error) { +func Analyze(ctx context.Context, client kv.Client, kvReq *kv.Request, vars *kv.Variables, + isRestrict bool) (SelectResult, error) { resp := client.Send(ctx, kvReq, vars) if resp == nil { return nil, errors.New("client returns nil response") } + label := metrics.LblGeneral + if isRestrict { + label = metrics.LblInternal + } result := &selectResult{ label: "analyze", resp: resp, results: make(chan resultWithErr, kvReq.Concurrency), closed: make(chan struct{}), feedback: statistics.NewQueryFeedback(0, nil, 0, false), + sqlType: label, } return result, nil } @@ -94,6 +106,7 @@ func Checksum(ctx context.Context, client kv.Client, kvReq *kv.Request, vars *kv results: make(chan resultWithErr, kvReq.Concurrency), closed: make(chan struct{}), feedback: statistics.NewQueryFeedback(0, nil, 0, false), + sqlType: metrics.LblGeneral, } return result, nil } diff --git a/distsql/distsql_test.go b/distsql/distsql_test.go index c933debaa52de..40e4419f7d025 100644 --- a/distsql/distsql_test.go +++ b/distsql/distsql_test.go @@ -64,6 +64,7 @@ func (s *testSuite) TestSelectNormal(c *C) { result, ok := response.(*selectResult) c.Assert(ok, IsTrue) c.Assert(result.label, Equals, "dag") + c.Assert(result.sqlType, Equals, "general") c.Assert(result.rowLen, Equals, len(colTypes)) response.Fetch(context.TODO()) @@ -143,12 +144,13 @@ func (s *testSuite) TestAnalyze(c *C) { Build() c.Assert(err, IsNil) - response, err := Analyze(context.TODO(), s.sctx.GetClient(), request, kv.DefaultVars) + response, err := Analyze(context.TODO(), s.sctx.GetClient(), request, kv.DefaultVars, true) c.Assert(err, IsNil) result, ok := response.(*selectResult) c.Assert(ok, IsTrue) c.Assert(result.label, Equals, "analyze") + c.Assert(result.sqlType, Equals, "internal") response.Fetch(context.TODO()) diff --git a/distsql/select_result.go b/distsql/select_result.go index ecdf76a79a3ad..77e3b2839cc18 100644 --- a/distsql/select_result.go +++ b/distsql/select_result.go @@ -67,6 +67,7 @@ type selectResult struct { feedback *statistics.QueryFeedback partialCount int64 // number of partial results. + sqlType string } func (r *selectResult) Fetch(ctx context.Context) { @@ -78,7 +79,7 @@ func (r *selectResult) fetch(ctx context.Context) { defer func() { close(r.results) duration := time.Since(startTime) - metrics.DistSQLQueryHistgram.WithLabelValues(r.label).Observe(duration.Seconds()) + metrics.DistSQLQueryHistgram.WithLabelValues(r.label, r.sqlType).Observe(duration.Seconds()) }() for { resultSubset, err := r.resp.Next(ctx) diff --git a/executor/adapter.go b/executor/adapter.go index 7b9877a5778c2..13d74fc21206b 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -354,15 +354,19 @@ func (a *ExecStmt) logSlowQuery(txnTS uint64, succ bool) { if len(sessVars.StmtCtx.IndexIDs) > 0 { indexIDs = strings.Replace(fmt.Sprintf("index_ids:%v ", a.Ctx.GetSessionVars().StmtCtx.IndexIDs), " ", ",", -1) } - user := a.Ctx.GetSessionVars().User + user := sessVars.User + var internal string + if sessVars.InRestrictedSQL { + internal = "[INTERNAL] " + } if costTime < threshold { logutil.SlowQueryLogger.Debugf( - "[QUERY] cost_time:%v %s succ:%v con:%v user:%s txn_start_ts:%v database:%v %v%vsql:%v", - costTime, sessVars.StmtCtx.GetExecDetails(), succ, connID, user, txnTS, currentDB, tableIDs, indexIDs, sql) + "[QUERY] %vcost_time:%v %s succ:%v con:%v user:%s txn_start_ts:%v database:%v %v%vsql:%v", + internal, costTime, sessVars.StmtCtx.GetExecDetails(), succ, connID, user, txnTS, currentDB, tableIDs, indexIDs, sql) } else { logutil.SlowQueryLogger.Warnf( - "[SLOW_QUERY] cost_time:%v %s succ:%v con:%v user:%s txn_start_ts:%v database:%v %v%vsql:%v", - costTime, sessVars.StmtCtx.GetExecDetails(), succ, connID, user, txnTS, currentDB, tableIDs, indexIDs, sql) + "[SLOW_QUERY] %vcost_time:%v %s succ:%v con:%v user:%s txn_start_ts:%v database:%v %v%vsql:%v", + internal, costTime, sessVars.StmtCtx.GetExecDetails(), succ, connID, user, txnTS, currentDB, tableIDs, indexIDs, sql) } } diff --git a/executor/analyze.go b/executor/analyze.go index f7bc176636197..46b825706791a 100644 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -176,7 +176,7 @@ func (e *AnalyzeIndexExec) open() error { SetConcurrency(e.concurrency). Build() ctx := context.TODO() - e.result, err = distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars) + e.result, err = distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL) if err != nil { return errors.Trace(err) } @@ -295,7 +295,7 @@ func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectRe return nil, errors.Trace(err) } ctx := context.TODO() - result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars) + result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL) if err != nil { return nil, errors.Trace(err) } diff --git a/metrics/distsql.go b/metrics/distsql.go index d93283eb81abb..71df60daa1f9e 100644 --- a/metrics/distsql.go +++ b/metrics/distsql.go @@ -26,7 +26,7 @@ var ( Name: "handle_query_duration_seconds", Help: "Bucketed histogram of processing time (s) of handled queries.", Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), - }, []string{LblType}) + }, []string{LblType, LblSQLType}) DistSQLScanKeysPartialHistogram = prometheus.NewHistogram( prometheus.HistogramOpts{ diff --git a/metrics/server.go b/metrics/server.go index 9d61c27e27bd9..61ecbf0de9d61 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -23,14 +23,14 @@ import ( // Metrics var ( - QueryDurationHistogram = prometheus.NewHistogram( + QueryDurationHistogram = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "tidb", Subsystem: "server", Name: "handle_query_duration_seconds", Help: "Bucketed histogram of processing time (s) of handled queries.", Buckets: prometheus.ExponentialBuckets(0.0005, 2, 22), - }) + }, []string{LblSQLType}) QueryTotalCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/metrics/session.go b/metrics/session.go index 5f75ace2b2c69..4ba4022f65f3a 100644 --- a/metrics/session.go +++ b/metrics/session.go @@ -17,30 +17,30 @@ import "github.com/prometheus/client_golang/prometheus" // Session metrics. var ( - SessionExecuteParseDuration = prometheus.NewHistogram( + SessionExecuteParseDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "tidb", Subsystem: "session", Name: "parse_duration_seconds", Help: "Bucketed histogram of processing time (s) in parse SQL.", Buckets: prometheus.LinearBuckets(0.00004, 0.00001, 13), - }) - SessionExecuteCompileDuration = prometheus.NewHistogram( + }, []string{LblSQLType}) + SessionExecuteCompileDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "tidb", Subsystem: "session", Name: "compile_duration_seconds", Help: "Bucketed histogram of processing time (s) in query optimize.", Buckets: prometheus.LinearBuckets(0.00004, 0.00001, 13), - }) - SessionExecuteRunDuration = prometheus.NewHistogram( + }, []string{LblSQLType}) + SessionExecuteRunDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "tidb", Subsystem: "session", Name: "execute_duration_seconds", Help: "Bucketed histogram of processing time (s) in running executor.", Buckets: prometheus.ExponentialBuckets(0.0001, 2, 13), - }) + }, []string{LblSQLType}) SchemaLeaseErrorCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "tidb", @@ -107,6 +107,9 @@ const ( LblRollback = "rollback" LblType = "type" LblResult = "result" + LblSQLType = "sql_type" + LblGeneral = "general" + LblInternal = "internal" ) func init() { diff --git a/server/conn.go b/server/conn.go index 1dcf3d2d1544b..21c74d0587886 100644 --- a/server/conn.go +++ b/server/conn.go @@ -569,7 +569,7 @@ func (cc *clientConn) addMetrics(cmd byte, startTime time.Time, err error) { } else { metrics.QueryTotalCounter.WithLabelValues(label, "OK").Inc() } - metrics.QueryDurationHistogram.Observe(time.Since(startTime).Seconds()) + metrics.QueryDurationHistogram.WithLabelValues(metrics.LblGeneral).Observe(time.Since(startTime).Seconds()) } // dispatch handles client request based on command which is the first byte of the data. diff --git a/session/session.go b/session/session.go index 11f5641553d9a..9f5e662edcfdf 100644 --- a/session/session.go +++ b/session/session.go @@ -542,6 +542,7 @@ func (s *session) ExecRestrictedSQL(sctx sessionctx.Context, sql string) ([]chun defer s.sysSessionPool().Put(tmp) metrics.SessionRestrictedSQLCounter.Inc() + startTime := time.Now() recordSets, err := se.Execute(ctx, sql) if err != nil { return nil, nil, errors.Trace(err) @@ -566,6 +567,7 @@ func (s *session) ExecRestrictedSQL(sctx sessionctx.Context, sql string) ([]chun fields = rs.Fields() } } + metrics.QueryDurationHistogram.WithLabelValues(metrics.LblInternal).Observe(time.Since(startTime).Seconds()) return rows, fields, nil } @@ -732,7 +734,11 @@ func (s *session) executeStatement(ctx context.Context, connID uint64, stmtNode } return nil, errors.Trace(err) } - metrics.SessionExecuteRunDuration.Observe(time.Since(startTime).Seconds()) + label := metrics.LblGeneral + if s.sessionVars.InRestrictedSQL { + label = metrics.LblInternal + } + metrics.SessionExecuteRunDuration.WithLabelValues(label).Observe(time.Since(startTime).Seconds()) if recordSet != nil { recordSets = append(recordSets, recordSet) @@ -766,7 +772,11 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []ast.Rec log.Warnf("con:%d parse error:\n%v\n%s", connID, err, sql) return nil, errors.Trace(err) } - metrics.SessionExecuteParseDuration.Observe(time.Since(startTS).Seconds()) + label := metrics.LblGeneral + if s.sessionVars.InRestrictedSQL { + label = metrics.LblInternal + } + metrics.SessionExecuteParseDuration.WithLabelValues(label).Observe(time.Since(startTS).Seconds()) compiler := executor.Compiler{Ctx: s} for _, stmtNode := range stmtNodes { @@ -784,7 +794,7 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []ast.Rec log.Warnf("con:%d compile error:\n%v\n%s", connID, err, sql) return nil, errors.Trace(err) } - metrics.SessionExecuteCompileDuration.Observe(time.Since(startTS).Seconds()) + metrics.SessionExecuteCompileDuration.WithLabelValues(label).Observe(time.Since(startTS).Seconds()) // Step3: Execute the physical plan. if recordSets, err = s.executeStatement(ctx, connID, stmtNode, stmt, recordSets); err != nil {