Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update dependencies #602

Merged
merged 7 commits into from
Mar 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
791 changes: 453 additions & 338 deletions Cargo.lock

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions agent/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ nonzero_ext = "0.3"
tracing = "0.1"

# k8s-openapi must match the version required by kube and enable a k8s version feature
k8s-openapi = { version = "0.19", default-features = false, features = ["v1_24"] }
kube = { version = "0.85", default-features = false, features = [ "derive", "runtime", "rustls-tls" ] }
k8s-openapi = { version = "0.21", default-features = false, features = ["v1_24"] }
kube = { version = "0.88", default-features = false, features = [ "derive", "runtime", "rustls-tls" ] }

semver = { version = "1.0", features = [ "serde" ] }
serde = { version = "1", features = [ "derive" ] }
Expand Down
12 changes: 7 additions & 5 deletions apiserver/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,20 @@ models = { path = "../models", version = "0.1.0" }
# tracing-actix-web version must align with actix-web version
actix-web = { version = "4.4", features = ["rustls-0_21"] }
awc = "3"
actix-web-opentelemetry = { version = "0.13", features = ["metrics", "metrics-prometheus"] }
actix-web-opentelemetry = { version = "0.17", features = ["metrics", "metrics-prometheus"] }
rustls = { version = "0.21" }
rustls-pemfile = { version = "1" }
webpki = { version = "0.22.4", features = ["std"] }
opentelemetry = { version = "0.18", features = ["rt-tokio-current-thread"]}
opentelemetry-prometheus = "0.11"
opentelemetry = { version = "0.22"}
yeazelm marked this conversation as resolved.
Show resolved Hide resolved
opentelemetry_sdk = {version = "0.22", features = ["rt-tokio-current-thread"]}
opentelemetry-prometheus = "0.15"
tracing = "0.1"
tracing-actix-web = "0.7"
prometheus = "0.13.0"

# k8s-openapi must match the version required by kube and enable a k8s version feature
k8s-openapi = { version = "0.19", default-features = false, features = ["v1_24"] }
kube = { version = "0.85", default-features = false, features = [ "client", "derive", "runtime", "rustls-tls" ] }
k8s-openapi = { version = "0.21", default-features = false, features = ["v1_24"] }
kube = { version = "0.88", default-features = false, features = [ "client", "derive", "runtime", "rustls-tls" ] }

async-trait = "0.1"
futures = "0.3"
Expand Down
16 changes: 5 additions & 11 deletions apiserver/src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use actix_web::{
web::{self, Data},
App, HttpServer,
};
use actix_web_opentelemetry::{PrometheusMetricsHandler, RequestMetricsBuilder, RequestTracing};
use actix_web_opentelemetry::{PrometheusMetricsHandler, RequestMetrics, RequestTracing};
use futures::StreamExt;
use k8s_openapi::api::core::v1::Pod;
use kube::{
Expand All @@ -38,7 +38,7 @@ use kube::{
},
ResourceExt,
};
use opentelemetry::global::meter;

use rustls::{
server::AllowAnyAnonymousOrAuthenticatedClient, Certificate, PrivateKey, RootCertStore,
ServerConfig,
Expand Down Expand Up @@ -111,7 +111,7 @@ pub struct APIServerSettings<T: BottlerocketShadowClient> {
pub async fn run_server<T: 'static + BottlerocketShadowClient>(
settings: APIServerSettings<T>,
k8s_client: kube::Client,
prometheus_exporter: opentelemetry_prometheus::PrometheusExporter,
prometheus_registry: prometheus::Registry,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Regarding the named meter, have we considered pass in the SdkMeterProvider as a parameter here. And create a meter in the function like

let meter = provider.meter("apiserver");

// Set up metrics request builder
let request_metrics = RequestMetricsBuilder::new().build(apiserver_meter);

Copy link
Contributor

@ytsssun ytsssun Mar 15, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alternative would be to just directly call global::meter_provider and get the GlobalMeterProvider for the application and create the meter by using this global provider. We called global::set_meter_provider here.

Referring to their official doc for global::meter_provider.

) -> Result<()> {
let public_key_path = format!("{}/{}", TLS_KEY_MOUNT_PATH, PUBLIC_KEY_NAME);
let certificate_cache =
Expand Down Expand Up @@ -146,12 +146,6 @@ pub async fn run_server<T: 'static + BottlerocketShadowClient>(
futures::future::ready(())
});

// Build the metrics meter
let apiserver_meter = meter("apiserver");

// Set up metrics request builder
let request_metrics = RequestMetricsBuilder::new().build(apiserver_meter);

// Set up the actix server.

// Use IP for KUBERNETES_SERVICE_HOST to decide the IP family for the cluster,
Expand Down Expand Up @@ -240,10 +234,10 @@ pub async fn run_server<T: 'static + BottlerocketShadowClient>(
.exclude(CRD_CONVERT_ENDPOINT),
)
.wrap(RequestTracing::new())
.wrap(request_metrics.clone())
.wrap(RequestMetrics::default())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is the implication here if we use default metrics instead of metrics created from a named Meter? Do we still have a way to tell if the metric is emitted by "apiserver"?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I looked around to find this info and essentially the docs no longer reference this way of naming global metrics. It might exist somewhere in the library but besides the name, the code was a straight copy of the example docs: https://docs.rs/opentelemetry-prometheus/0.11.0/opentelemetry_prometheus/ I verified that the prometheus functionality seems to still work, but I don't have any context on if there was more to this namespacing than this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We might be able to set the global provider:

let provider = SdkMeterProvider::builder().build();
let meter = provider.meter("apiserver");
global::set_meter_provider(provider.clone());

But its unclear if that will actually set it since we are setting the meter provider but the meter namespace is on the meter, not the provider. I can take a bit more time to research this and see if I can find a solid answer. We might also consider using .with_resource(Resource::new([KeyValue::new("service.name", "my_app")])) as described in https://github.com/OutThereLabs/actix-web-opentelemetry/blob/main/examples/server.rs. I'll have to see if I can pull the logs to confirm which approach solves this for us.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Replied in a new comment. I think we can pass this provider to the run_server and use the meter_provider to create a meter in the run_server func like we did in the old commit.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I found how to add the apiserver back into the tracing so we should be good. FWIW the logs don't really have the namespacing I was worried about:

kubectl --kubeconfig /tmp/brupop27-us-west-2/kubeconfig.yaml logs brupop-apiserver-6bf69bf794-vnhwl -n brupop-bottlerocket-aws
....
  2024-03-16T19:13:26.354337Z  INFO models::node::drain: Pod brupop-apiserver-6bf69bf794-xqjgs deleted.
    at models/src/node/drain.rs:287
    in models::node::drain::wait_for_deletion
    in models::node::drain::drain_node with node_name: "ip-192-168-152-25.us-west-2.compute.internal"
    in models::node::client::drain_node with selector: BottlerocketShadowSelector { node_name: "ip-192-168-152-25.us-west-2.compute.internal", node_uid: "fe061dba-ee1d-4e21-aa03-5d20c8c33e16" }
    in apiserver::telemetry::HTTP request with http.method: POST, http.route: /bottlerocket-node-resource/cordon-and-drain, http.flavor: 2.0, http.scheme: https, http.host: brupop-apiserver.brupop-bottlerocket-aws.svc.cluster.local, http.client_ip: 192.168.137.65, http.user_agent: , http.target: /bottlerocket-node-resource/cordon-and-drain, otel.name: HTTP POST /bottlerocket-node-resource/cordon-and-drain, otel.kind: "server", request_id: 1879d381-bba7-48c7-ad49-6ff6cd286f1f, node_name: "ip-192-168-152-25.us-west-2.compute.internal"

  2024-03-16T19:13:26.363742Z  INFO models::node::drain: Pod nginx-test-67cb89c578-v7f49 deleted.
    at models/src/node/drain.rs:287
    in models::node::drain::wait_for_deletion
    in models::node::drain::drain_node with node_name: "ip-192-168-152-25.us-west-2.compute.internal"
    in models::node::client::drain_node with selector: BottlerocketShadowSelector { node_name: "ip-192-168-152-25.us-west-2.compute.internal", node_uid: "fe061dba-ee1d-4e21-aa03-5d20c8c33e16" }
    in apiserver::telemetry::HTTP request with http.method: POST, http.route: /bottlerocket-node-resource/cordon-and-drain, http.flavor: 2.0, http.scheme: https, http.host: brupop-apiserver.brupop-bottlerocket-aws.svc.cluster.local, http.client_ip: 192.168.137.65, http.user_agent: , http.target: /bottlerocket-node-resource/cordon-and-drain, otel.name: HTTP POST /bottlerocket-node-resource/cordon-and-drain, otel.kind: "server", request_id: 1879d381-bba7-48c7-ad49-6ff6cd286f1f, node_name: "ip-192-168-152-25.us-west-2.compute.internal"

  2024-03-16T19:13:26.364038Z  INFO models::node::drain: Pod brupop-controller-deployment-b5f58c996-twvq6 deleted.
    at models/src/node/drain.rs:287
    in models::node::drain::wait_for_deletion
    in models::node::drain::drain_node with node_name: "ip-192-168-152-25.us-west-2.compute.internal"
    in models::node::client::drain_node with selector: BottlerocketShadowSelector { node_name: "ip-192-168-152-25.us-west-2.compute.internal", node_uid: "fe061dba-ee1d-4e21-aa03-5d20c8c33e16" }
    in apiserver::telemetry::HTTP request with http.method: POST, http.route: /bottlerocket-node-resource/cordon-and-drain, http.flavor: 2.0, http.scheme: https, http.host: brupop-apiserver.brupop-bottlerocket-aws.svc.cluster.local, http.client_ip: 192.168.137.65, http.user_agent: , http.target: /bottlerocket-node-resource/cordon-and-drain, otel.name: HTTP POST /bottlerocket-node-resource/cordon-and-drain, otel.kind: "server", request_id: 1879d381-bba7-48c7-ad49-6ff6cd286f1f, node_name: "ip-192-168-152-25.us-west-2.compute.internal"

  2024-03-16T19:13:26.404277Z  INFO models::node::drain: Pod cert-manager-cainjector-8699cf859b-hnswk deleted.
    at models/src/node/drain.rs:287
    in models::node::drain::wait_for_deletion
    in models::node::drain::drain_node with node_name: "ip-192-168-152-25.us-west-2.compute.internal"
    in models::node::client::drain_node with selector: BottlerocketShadowSelector { node_name: "ip-192-168-152-25.us-west-2.compute.internal", node_uid: "fe061dba-ee1d-4e21-aa03-5d20c8c33e16" }
    in apiserver::telemetry::HTTP request with http.method: POST, http.route: /bottlerocket-node-resource/cordon-and-drain, http.flavor: 2.0, http.scheme: https, http.host: brupop-apiserver.brupop-bottlerocket-aws.svc.cluster.local, http.client_ip: 192.168.137.65, http.user_agent: , http.target: /bottlerocket-node-resource/cordon-and-drain, otel.name: HTTP POST /bottlerocket-node-resource/cordon-and-drain, otel.kind: "server", request_id: 1879d381-bba7-48c7-ad49-6ff6cd286f1f, node_name: "ip-192-168-152-25.us-west-2.compute.internal"

.route(
"/metrics",
web::get().to(PrometheusMetricsHandler::new(prometheus_exporter.clone())),
web::get().to(PrometheusMetricsHandler::new(prometheus_registry.clone())),
)
.wrap(TracingLogger::<telemetry::BrupopApiserverRootSpanBuilder>::new())
.app_data(Data::new(settings.clone()))
Expand Down
35 changes: 22 additions & 13 deletions apiserver/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@ use apiserver::api::{self, APIServerSettings};
use apiserver_error::{StartServerSnafu, StartTelemetrySnafu};
use models::node::K8SBottlerocketShadowClient;
use models::telemetry;
use opentelemetry::global;
use tracing::{event, Level};

use opentelemetry::sdk::export::metrics::aggregation;
use opentelemetry::sdk::metrics::{controllers, processors, selectors};

use opentelemetry::KeyValue;
use opentelemetry_sdk::metrics::SdkMeterProvider;
use opentelemetry_sdk::Resource;
use snafu::ResultExt;

use std::convert::TryFrom;
Expand Down Expand Up @@ -34,16 +35,19 @@ async fn main() {

async fn run_server() -> Result<(), apiserver_error::Error> {
telemetry::init_telemetry_from_env().context(StartTelemetrySnafu)?;
let controller = controllers::basic(
processors::factory(
selectors::simple::histogram([1.0, 2.0, 5.0, 10.0, 20.0, 50.0]),
aggregation::cumulative_temporality_selector(),
)
.with_memory(true),
)
.build();

let prometheus_exporter = opentelemetry_prometheus::exporter(controller).init();
let prometheus_registry = prometheus::Registry::new();

let prometheus_exporter = opentelemetry_prometheus::exporter()
.with_registry(prometheus_registry.clone())
.build()
.context(apiserver_error::PrometheusRegsitrySnafu)?;

let prometheus_provider = SdkMeterProvider::builder()
.with_reader(prometheus_exporter)
.with_resource(Resource::new([KeyValue::new("service.name", "apiserver")]))
.build();
global::set_meter_provider(prometheus_provider);

let incluster_config =
kube::Config::incluster_dns().context(apiserver_error::K8sClientConfigSnafu)?;
Expand All @@ -68,7 +72,7 @@ async fn run_server() -> Result<(), apiserver_error::Error> {
namespace,
};

api::run_server(settings, k8s_client, prometheus_exporter)
api::run_server(settings, k8s_client, prometheus_registry)
.await
.context(StartServerSnafu)
}
Expand Down Expand Up @@ -109,5 +113,10 @@ pub mod apiserver_error {
StartServer {
source: apiserver::api::error::Error,
},

#[snafu(display("Error creating prometheus registry: '{}'", source))]
PrometheusRegsitry {
source: opentelemetry::metrics::MetricsError,
},
}
}
9 changes: 5 additions & 4 deletions controller/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@ semver = "1.0"
serde = "1"
serde_plain = "1"
# k8s-openapi must match the version required by kube and enable a k8s version feature
k8s-openapi = { version = "0.19", default-features = false, features = ["v1_24"] }
kube = { version = "0.85", default-features = false, features = [ "derive", "runtime", "rustls-tls" ] }
k8s-openapi = { version = "0.21", default-features = false, features = ["v1_24"] }
kube = { version = "0.88", default-features = false, features = [ "derive", "runtime", "rustls-tls" ] }
models = { path = "../models", version = "0.1.0" }
opentelemetry = { version = "0.18", features = ["rt-tokio-current-thread"] }
opentelemetry-prometheus = "0.11"
opentelemetry = { version = "0.22"}
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio-current-thread"]}
opentelemetry-prometheus = "0.15"
prometheus = "0.13.0"

snafu = "0.7"
Expand Down
6 changes: 4 additions & 2 deletions controller/src/controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ use kube::api::DeleteParams;
use kube::runtime::reflector::Store;
use kube::Api;
use kube::ResourceExt;
use opentelemetry::global;
use opentelemetry::metrics::MeterProvider;
use opentelemetry_sdk::metrics::SdkMeterProvider;
use snafu::ResultExt;
use std::collections::BTreeMap;
use std::env;
Expand Down Expand Up @@ -52,10 +53,11 @@ impl<T: BottlerocketShadowClient> BrupopController<T> {
brs_reader: Store<BottlerocketShadow>,
node_reader: Store<Node>,
namespace: &str,
provider: &SdkMeterProvider,
) -> Self {
// Creates brupop-controller meter via the configured
// GlobalMeterProvider which is setup in PrometheusExporter
let meter = global::meter("brupop-controller");
let meter = provider.meter("brupop-controller");
let metrics = BrupopControllerMetrics::new(meter);
BrupopController {
k8s_client,
Expand Down
36 changes: 22 additions & 14 deletions controller/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@ use kube::{
ResourceExt,
};

use opentelemetry::sdk::export::metrics::aggregation;
use opentelemetry::sdk::metrics::{controllers, processors, selectors};
use opentelemetry_sdk::metrics::SdkMeterProvider;
use snafu::ResultExt;
use tracing::{event, Level};

Expand Down Expand Up @@ -50,18 +49,16 @@ async fn main() -> Result<()> {

let node_client = K8SBottlerocketShadowClient::new(k8s_client.clone(), &namespace);

let controller = controllers::basic(
processors::factory(
selectors::simple::histogram([1.0, 2.0, 5.0, 10.0, 20.0, 50.0]),
aggregation::cumulative_temporality_selector(),
)
.with_memory(false),
)
.build();
let registry = prometheus::Registry::new();

// Exporter has to be initialized before BrupopController
// in order to setup global meter provider properly
let exporter = opentelemetry_prometheus::exporter(controller).init();
let exporter = opentelemetry_prometheus::exporter()
.with_registry(registry.clone())
.build()
.context(controller_error::PrometheusRegsitrySnafu)?;

let provider = SdkMeterProvider::builder().with_reader(exporter).build();

// Setup and run a reflector, ensuring that `BottlerocketShadow` updates are reflected to the controller.
let brs_reflector = reflector::reflector(
Expand Down Expand Up @@ -96,8 +93,14 @@ async fn main() -> Result<()> {
});

// Setup and run the controller.
let controller =
BrupopController::new(k8s_client, node_client, brs_reader, node_reader, &namespace);
let controller = BrupopController::new(
k8s_client,
node_client,
brs_reader,
node_reader,
&namespace,
&provider,
);
let controller_runner = controller.run();

let k8s_service_addr = env::var("KUBERNETES_SERVICE_HOST")
Expand All @@ -113,7 +116,7 @@ async fn main() -> Result<()> {
// Setup Http server to vend prometheus metrics
let prometheus_server = HttpServer::new(move || {
App::new()
.app_data(Data::new(exporter.clone()))
.app_data(Data::new(registry.clone()))
.service(vending_metrics)
})
.bind(format!("{}:{}", bindaddress, CONTROLLER_INTERNAL_PORT))
Expand Down Expand Up @@ -178,5 +181,10 @@ pub mod controller_error {
TelemetryInit {
source: telemetry::TelemetryConfigError,
},

#[snafu(display("Error creating prometheus registry: '{}'", source))]
PrometheusRegsitry {
source: opentelemetry::metrics::MetricsError,
},
}
}
8 changes: 4 additions & 4 deletions controller/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,19 +81,19 @@ impl BrupopControllerMetrics {
.with_description("Brupop host's state")
.init();

let _ = meter.register_callback(move |cx| {
let _ = meter.register_callback(&[brupop_hosts_version_observer.as_any()], move |cx| {
let data = hosts_data_clone_for_version.lock().unwrap();
for (host_version, count) in &data.hosts_version_count {
let labels = vec![HOST_VERSION_KEY.string(host_version.to_string())];
brupop_hosts_version_observer.observe(cx, *count, &labels);
cx.observe_u64(&brupop_hosts_version_observer, *count, &labels);
}
});

let _ = meter.register_callback(move |cx| {
let _ = meter.register_callback(&[brupop_hosts_state_observer.as_any()], move |cx| {
let data = hosts_data_clone_for_state.lock().unwrap();
for (host_state, count) in &data.hosts_state_count {
let labels = vec![HOST_STATE_KEY.string(host_state.to_string())];
brupop_hosts_state_observer.observe(cx, *count, &labels);
cx.observe_u64(&brupop_hosts_state_observer, *count, &labels);
}
});

Expand Down
18 changes: 9 additions & 9 deletions controller/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ pub(crate) mod test {
let test_cases = vec![
(
Schedule::from_str("* * * * * * *".as_ref()).unwrap(),
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 1, 1)
.unwrap()
.and_hms_opt(2, 0, 0)
Expand All @@ -264,7 +264,7 @@ pub(crate) mod test {
),
(
Schedule::from_str("10 10 10 * * * *".as_ref()).unwrap(),
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 1, 1)
.unwrap()
.and_hms_opt(2, 0, 0)
Expand All @@ -275,7 +275,7 @@ pub(crate) mod test {
),
(
Schedule::from_str("10 10 10 * * Mon *".as_ref()).unwrap(),
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 1, 1)
.unwrap()
.and_hms_opt(2, 0, 0)
Expand All @@ -297,7 +297,7 @@ pub(crate) mod test {
fn test_duration_to_next() {
let test_cases = vec![
(
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 12, 1)
.unwrap()
.and_hms_opt(2, 0, 0)
Expand All @@ -308,7 +308,7 @@ pub(crate) mod test {
chrono::Duration::hours(2),
),
(
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 12, 1)
.unwrap()
.and_hms_opt(0, 0, 0)
Expand All @@ -319,7 +319,7 @@ pub(crate) mod test {
chrono::Duration::days(30),
),
(
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 12, 1)
.unwrap()
.and_hms_opt(0, 0, 0)
Expand All @@ -341,7 +341,7 @@ pub(crate) mod test {
fn test_should_discontinue_updates_impl() {
let test_cases = vec![
(
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 12, 1)
.unwrap()
.and_hms_opt(2, 0, 0)
Expand All @@ -352,7 +352,7 @@ pub(crate) mod test {
false,
),
(
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 12, 1)
.unwrap()
.and_hms_opt(0, 0, 0)
Expand All @@ -363,7 +363,7 @@ pub(crate) mod test {
false,
),
(
DateTime::<Utc>::from_utc(
DateTime::<Utc>::from_naive_utc_and_offset(
NaiveDate::from_ymd_opt(2099, 12, 1)
.unwrap()
.and_hms_opt(0, 0, 0)
Expand Down
6 changes: 2 additions & 4 deletions controller/src/telemetry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,9 @@ use opentelemetry::{global, metrics::MetricsError};
use prometheus::{Encoder, TextEncoder};

#[get("/metrics")]
pub async fn vending_metrics(
exporter: Data<opentelemetry_prometheus::PrometheusExporter>,
) -> HttpResponse {
pub async fn vending_metrics(registry: Data<prometheus::Registry>) -> HttpResponse {
let encoder = TextEncoder::new();
let metric_families = exporter.registry().gather();
let metric_families = registry.gather();
let mut buf = Vec::new();
if let Err(err) = encoder.encode(&metric_families[..], &mut buf) {
global::handle_error(MetricsError::Other(err.to_string()));
Expand Down
2 changes: 1 addition & 1 deletion deploy/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ license = "Apache-2.0 OR MIT"
[build-dependencies]
models = { path = "../models", version = "0.1.0" }
dotenv = "0.15"
kube = { version = "0.85", default-features = false, features = [ "derive", "runtime" ] }
kube = { version = "0.88", default-features = false, features = [ "derive", "runtime" ] }
serde_yaml = "0.9"

[dev-dependencies]
Expand Down
Loading