This repository has been archived by the owner on Mar 17, 2024. It is now read-only.
-
-
Notifications
You must be signed in to change notification settings - Fork 198
Adding support to control which prometheus metrics to expose #62
Merged
Merged
Changes from 1 commit
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,48 +5,54 @@ | |
package com.lightbend.kafkalagexporter | ||
|
||
import com.lightbend.kafkalagexporter.MetricsSink._ | ||
import com.lightbend.kafkalagexporter.PrometheusEndpointSink.ClusterGlobalLabels | ||
import io.prometheus.client.exporter.HTTPServer | ||
import io.prometheus.client.hotspot.DefaultExports | ||
import io.prometheus.client.{CollectorRegistry, Gauge} | ||
|
||
import scala.util.Try | ||
|
||
object PrometheusEndpointSink { | ||
def apply(appConfig: AppConfig, definitions: MetricDefinitions): MetricsSink = | ||
Try(new PrometheusEndpointSink(appConfig, definitions)) | ||
type ClusterName = String | ||
type GlobalLabels = Map[String, String] | ||
type ClusterGlobalLabels = Map[ClusterName, GlobalLabels] | ||
|
||
def apply(definitions: MetricDefinitions, metricWhitelist: List[String], clusterGlobalLabels: ClusterGlobalLabels, | ||
server: HTTPServer, registry: CollectorRegistry): MetricsSink = { | ||
Try(new PrometheusEndpointSink(definitions, metricWhitelist, clusterGlobalLabels, server, registry)) | ||
.fold(t => throw new Exception("Could not create Prometheus Endpoint", t), sink => sink) | ||
} | ||
} | ||
|
||
class PrometheusEndpointSink private(appConfig: AppConfig, definitions: MetricDefinitions) extends MetricsSink { | ||
|
||
private val server = new HTTPServer(appConfig.port) | ||
private val registry = CollectorRegistry.defaultRegistry | ||
|
||
class PrometheusEndpointSink private(definitions: MetricDefinitions, metricWhitelist: List[String], clusterGlobalLabels: ClusterGlobalLabels, | ||
server: HTTPServer, registry: CollectorRegistry) extends MetricsSink { | ||
DefaultExports.initialize() | ||
|
||
private val metrics: Map[String, Map[GaugeDefinition, Gauge]] = { | ||
appConfig.clusters.map { cluster => | ||
val globalLabelNamesForCluster = appConfig.globalLabelsForCluster(cluster.name).keys.toSeq | ||
cluster.name -> definitions.map(definition => | ||
definition -> Gauge.build() | ||
.name(definition.name) | ||
.help(definition.help) | ||
.labelNames(globalLabelNamesForCluster ++ definition.labels: _*) | ||
private val metrics: Map[PrometheusEndpointSink.ClusterName, Map[GaugeDefinition, Gauge]] = clusterGlobalLabels.map { | ||
case (clusterName, globalLabels) => | ||
clusterName -> definitions.filter(d => metricWhitelist.exists(d.name.matches)).map { d => | ||
d -> Gauge.build() | ||
.name(d.name) | ||
.help(d.help) | ||
.labelNames(globalLabels.keys.toSeq ++ d.labels: _*) | ||
.register(registry) | ||
).toMap | ||
}.toMap | ||
}.toMap | ||
} | ||
|
||
override def report(m: MetricValue): Unit = { | ||
val metric = getMetricsForClusterName(m.definition, m.clusterName) | ||
val globalLabelValuesForCluster = appConfig.globalLabelsForCluster(m.clusterName).values.toSeq | ||
metric.labels(globalLabelValuesForCluster ++ m.labels: _*).set(m.value) | ||
if(metricWhitelist.exists(m.definition.name.matches)) { | ||
val metric = getMetricsForClusterName(m.definition, m.clusterName) | ||
val globalLabelValuesForCluster = clusterGlobalLabels.getOrElse(m.clusterName, Map.empty) | ||
metric.labels(globalLabelValuesForCluster.values.toSeq ++ m.labels: _*).set(m.value) | ||
} | ||
} | ||
|
||
|
||
override def remove(m: RemoveMetric): Unit = { | ||
metrics.foreach { case (_, gaugeDefinitionsForCluster) => | ||
gaugeDefinitionsForCluster.get(m.definition).foreach(_.remove(m.labels: _*)) | ||
if(metricWhitelist.exists(m.definition.name.matches)) { | ||
metrics.foreach { case (_, gaugeDefinitionsForCluster) => | ||
val globalLabelValuesForCluster = clusterGlobalLabels.getOrElse(m.clusterName, Map.empty) | ||
gaugeDefinitionsForCluster.get(m.definition).foreach(_.remove(globalLabelValuesForCluster.values.toSeq ++ m.labels: _*)) | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for fixing this. [minor] This could be made more clear with a for expression. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have substituted foreach with for expression. Let me know if the changes is what you have in mind. |
||
} | ||
} | ||
|
||
|
@@ -60,7 +66,7 @@ class PrometheusEndpointSink private(appConfig: AppConfig, definitions: MetricDe | |
} | ||
|
||
private def getMetricsForClusterName(gaugeDefinition: GaugeDefinition, clusterName: String): Gauge = { | ||
val metricsForCluster = metrics.getOrElse(clusterName, throw new IllegalArgumentException(s"No metric for the ${clusterName} registered")) | ||
val metricsForCluster = metrics.getOrElse(clusterName, throw new IllegalArgumentException(s"No metric for the $clusterName registered")) | ||
metricsForCluster.getOrElse(gaugeDefinition, throw new IllegalArgumentException(s"No metric with definition ${gaugeDefinition.name} registered")) | ||
} | ||
} |
106 changes: 106 additions & 0 deletions
106
src/test/scala/com/lightbend/kafkalagexporter/PrometheusEndpointSinkTest.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
/* | ||
* Copyright (C) 2019 Lightbend Inc. <http://www.lightbend.com> | ||
*/ | ||
|
||
package com.lightbend.kafkalagexporter | ||
|
||
import java.net.ServerSocket | ||
|
||
import io.prometheus.client.CollectorRegistry | ||
import io.prometheus.client.exporter.HTTPServer | ||
import org.scalatest._ | ||
|
||
import scala.collection.JavaConverters._ | ||
import scala.util.{Failure, Success, Try} | ||
|
||
class PrometheusEndpointSinkTest extends fixture.FreeSpec with Matchers { | ||
|
||
case class Fixture(server: HTTPServer, registry: CollectorRegistry) | ||
type FixtureParam = Fixture | ||
|
||
override def withFixture(test: OneArgTest): Outcome = { | ||
val httpServer = | ||
Try(new ServerSocket(0)) match { | ||
case Success(socket) => | ||
val freePort = socket.getLocalPort | ||
socket.close() | ||
new HTTPServer(freePort) | ||
case Failure(exception) => throw exception | ||
} | ||
val registry = CollectorRegistry.defaultRegistry | ||
try test(Fixture(httpServer, registry)) | ||
finally { | ||
registry.clear() | ||
httpServer.stop() | ||
} | ||
} | ||
|
||
"PrometheusEndpointSinkImpl should" - { | ||
|
||
"register only metrics which match the regex" in { fixture => | ||
PrometheusEndpointSink(Metrics.definitions, List(".*max_lag.*"), Map("cluster" -> Map.empty), fixture.server, fixture.registry) | ||
val metricSamples = fixture.registry.metricFamilySamples().asScala.toSet | ||
|
||
metricSamples.map(_.name).intersect(Metrics.definitions.map(_.name).toSet) should contain theSameElementsAs | ||
Set("kafka_consumergroup_group_max_lag", "kafka_consumergroup_group_max_lag_seconds") | ||
} | ||
|
||
"append global labels to metric labels" in { fixture => | ||
val groupLabel = Map( | ||
"cluster" -> Map( | ||
"environment" ->"dev", | ||
"org" -> "organization", | ||
) | ||
) | ||
val sink = PrometheusEndpointSink(Metrics.definitions, List(".*"), groupLabel, fixture.server, fixture.registry) | ||
sink.report(Metrics.GroupValueMessage(Metrics.MaxGroupTimeLagMetric, "cluster", "group", 1)) | ||
|
||
val metricSamples = fixture.registry.metricFamilySamples().asScala.toList | ||
val maxGroupTimeLagMetricSamples = metricSamples.filter(_.name.equals(Metrics.MaxGroupTimeLagMetric.name)).flatMap(_.samples.asScala) | ||
|
||
maxGroupTimeLagMetricSamples should have length 1 | ||
val labels = maxGroupTimeLagMetricSamples.flatMap(_.labelNames.asScala) | ||
val labelValues = maxGroupTimeLagMetricSamples.flatMap(_.labelValues.asScala) | ||
(labels zip labelValues).toMap should contain theSameElementsAs | ||
Map( | ||
"environment" ->"dev", | ||
"org" -> "organization", | ||
"cluster_name" -> "cluster", | ||
"group" -> "group", | ||
) | ||
|
||
sink.remove(Metrics.GroupRemoveMetricMessage(Metrics.MaxGroupTimeLagMetric, "cluster", "group")) | ||
|
||
val metricSamplesAfterRemoval = fixture.registry.metricFamilySamples().asScala.toList | ||
val maxGroupTimeLagMetricSamplesAfterRemoval = metricSamplesAfterRemoval.filter(_.name.equals(Metrics.MaxGroupTimeLagMetric.name)).flatMap(_.samples.asScala) | ||
|
||
|
||
maxGroupTimeLagMetricSamplesAfterRemoval should have length 0 | ||
} | ||
|
||
"report only metrics which match the regex" in { fixture => | ||
val sink = PrometheusEndpointSink(Metrics.definitions, List("kafka_consumergroup_group_max_lag"), Map("cluster" -> Map.empty), | ||
fixture.server, fixture.registry) | ||
sink.report(Metrics.GroupValueMessage(Metrics.MaxGroupOffsetLagMetric, "cluster", "group", 100)) | ||
sink.report(Metrics.GroupValueMessage(Metrics.MaxGroupTimeLagMetric, "cluster", "group", 1)) | ||
val labels = Array[String]("cluster_name", "group") | ||
val labelVals = Array[String]("cluster", "group") | ||
fixture.registry.getSampleValue("kafka_consumergroup_group_max_lag", labels, labelVals) should be (100) | ||
val metricSamples = fixture.registry.metricFamilySamples().asScala.toSet | ||
metricSamples.map(_.name) should not contain "kafka_consumergroup_group_max_lag_seconds" | ||
} | ||
|
||
"remove only metrics which match the regex" in { fixture => | ||
val sink = PrometheusEndpointSink(Metrics.definitions, List("kafka_consumergroup_group_max_lag"), Map("cluster" -> Map.empty), | ||
fixture.server, fixture.registry) | ||
sink.report(Metrics.GroupValueMessage(Metrics.MaxGroupOffsetLagMetric, "cluster", "group", 100)) | ||
sink.remove(Metrics.GroupRemoveMetricMessage(Metrics.MaxGroupOffsetLagMetric, "cluster", "group")) | ||
sink.remove(Metrics.GroupRemoveMetricMessage(Metrics.MaxGroupTimeLagMetric, "cluster", "group")) | ||
val labels = Array[String]("cluster_name", "group") | ||
val labelVals = Array[String]("cluster", "group") | ||
fixture.registry.getSampleValue("kafka_consumergroup_group_max_lag", labels, labelVals) should be (null) | ||
} | ||
|
||
} | ||
|
||
} |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nice cleanup.