From 1dd505fde140b0d64431346bfc72ee24144b8710 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Tue, 25 Jul 2023 07:59:12 -0700 Subject: [PATCH] chore(deps): Update to Rust 1.71.0 (#18075) * chore(deps): Update to Rust 1.71.0 Signed-off-by: Jesse Szwedko * clippy Signed-off-by: Jesse Szwedko * fmt Signed-off-by: Jesse Szwedko * clippy Signed-off-by: Jesse Szwedko --------- Signed-off-by: Jesse Szwedko --- Tiltfile | 2 +- lib/codecs/src/decoding/format/native.rs | 2 +- lib/vector-common/src/finalizer.rs | 4 ++-- .../src/schema/visitors/human_name.rs | 14 +++++++------- lib/vector-core/src/tls/incoming.rs | 4 ++-- lib/vector-core/src/transform/mod.rs | 2 +- rust-toolchain.toml | 2 +- src/conditions/datadog_search.rs | 4 ++-- src/config/watcher.rs | 2 +- src/sinks/clickhouse/sink.rs | 2 +- src/sinks/datadog/logs/sink.rs | 2 +- src/sinks/datadog/metrics/normalizer.rs | 2 +- src/sinks/greptimedb/service.rs | 2 +- src/sinks/greptimedb/sink.rs | 2 +- src/sinks/loki/sink.rs | 2 +- src/sinks/mod.rs | 2 +- src/sinks/splunk_hec/metrics/sink.rs | 2 +- src/sinks/statsd/normalizer.rs | 2 +- src/sinks/statsd/sink.rs | 5 +---- src/sources/mod.rs | 6 +++--- src/sources/util/grpc/mod.rs | 2 +- src/sources/util/mod.rs | 4 ++-- 22 files changed, 34 insertions(+), 37 deletions(-) diff --git a/Tiltfile b/Tiltfile index 6c0c9246042b4..dcc27daed3887 100644 --- a/Tiltfile +++ b/Tiltfile @@ -7,7 +7,7 @@ load('ext://helm_resource', 'helm_resource', 'helm_repo') docker_build( ref='timberio/vector', context='.', - build_args={'RUST_VERSION': '1.70.0'}, + build_args={'RUST_VERSION': '1.71.0'}, dockerfile='tilt/Dockerfile' ) diff --git a/lib/codecs/src/decoding/format/native.rs b/lib/codecs/src/decoding/format/native.rs index 1f47414e103f5..5258e96e6f282 100644 --- a/lib/codecs/src/decoding/format/native.rs +++ b/lib/codecs/src/decoding/format/native.rs @@ -19,7 +19,7 @@ pub struct NativeDeserializerConfig; impl NativeDeserializerConfig { /// Build the `NativeDeserializer` from this configuration. pub fn build(&self) -> NativeDeserializer { - NativeDeserializer::default() + NativeDeserializer } /// Return the type of event build by this deserializer. diff --git a/lib/vector-common/src/finalizer.rs b/lib/vector-common/src/finalizer.rs index 15bc52ab3510d..9af4237a6a341 100644 --- a/lib/vector-common/src/finalizer.rs +++ b/lib/vector-common/src/finalizer.rs @@ -62,7 +62,7 @@ where Self { sender: Some(todo_tx), flush: flush1, - _phantom: PhantomData::default(), + _phantom: PhantomData, }, finalizer_stream(shutdown, todo_rx, S::default(), flush2).boxed(), ) @@ -199,7 +199,7 @@ pub struct EmptyStream(PhantomData); impl Default for EmptyStream { fn default() -> Self { - Self(PhantomData::default()) + Self(PhantomData) } } diff --git a/lib/vector-config/src/schema/visitors/human_name.rs b/lib/vector-config/src/schema/visitors/human_name.rs index dc2fb7e57cca9..4b9b2330d5ed2 100644 --- a/lib/vector-config/src/schema/visitors/human_name.rs +++ b/lib/vector-config/src/schema/visitors/human_name.rs @@ -127,7 +127,7 @@ mod tests { } })); - let mut visitor = GenerateHumanFriendlyNameVisitor::default(); + let mut visitor = GenerateHumanFriendlyNameVisitor; visitor.visit_root_schema(&mut actual_schema); assert_schemas_eq(expected_schema, actual_schema); @@ -150,7 +150,7 @@ mod tests { } })); - let mut visitor = GenerateHumanFriendlyNameVisitor::default(); + let mut visitor = GenerateHumanFriendlyNameVisitor; visitor.visit_root_schema(&mut actual_schema); assert_schemas_eq(expected_schema, actual_schema); @@ -177,7 +177,7 @@ mod tests { } })); - let mut visitor = GenerateHumanFriendlyNameVisitor::default(); + let mut visitor = GenerateHumanFriendlyNameVisitor; visitor.visit_root_schema(&mut actual_schema); assert_schemas_eq(expected_schema, actual_schema); @@ -204,7 +204,7 @@ mod tests { } })); - let mut visitor = GenerateHumanFriendlyNameVisitor::default(); + let mut visitor = GenerateHumanFriendlyNameVisitor; visitor.visit_root_schema(&mut actual_schema); assert_schemas_eq(expected_schema, actual_schema); @@ -222,7 +222,7 @@ mod tests { let expected_schema = actual_schema.clone(); - let mut visitor = GenerateHumanFriendlyNameVisitor::default(); + let mut visitor = GenerateHumanFriendlyNameVisitor; visitor.visit_root_schema(&mut actual_schema); assert_schemas_eq(expected_schema, actual_schema); @@ -244,7 +244,7 @@ mod tests { let expected_schema = actual_schema.clone(); - let mut visitor = GenerateHumanFriendlyNameVisitor::default(); + let mut visitor = GenerateHumanFriendlyNameVisitor; visitor.visit_root_schema(&mut actual_schema); assert_schemas_eq(expected_schema, actual_schema); @@ -278,7 +278,7 @@ mod tests { } })); - let mut visitor = GenerateHumanFriendlyNameVisitor::default(); + let mut visitor = GenerateHumanFriendlyNameVisitor; visitor.visit_root_schema(&mut actual_schema); assert_schemas_eq(expected_schema, actual_schema); diff --git a/lib/vector-core/src/tls/incoming.rs b/lib/vector-core/src/tls/incoming.rs index 5d2fd1cdbbae2..7992f2c2b1014 100644 --- a/lib/vector-core/src/tls/incoming.rs +++ b/lib/vector-core/src/tls/incoming.rs @@ -263,7 +263,7 @@ impl MaybeTlsIncomingStream { where F: FnOnce(Pin<&mut MaybeTlsStream>, &mut Context) -> Poll>, { - let mut this = self.get_mut(); + let this = self.get_mut(); loop { return match &mut this.state { StreamState::Accepted(stream) => poll_fn(Pin::new(stream), cx), @@ -307,7 +307,7 @@ impl AsyncWrite for MaybeTlsIncomingStream { } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut this = self.get_mut(); + let this = self.get_mut(); match &mut this.state { StreamState::Accepted(stream) => match Pin::new(stream).poll_shutdown(cx) { Poll::Ready(Ok(())) => { diff --git a/lib/vector-core/src/transform/mod.rs b/lib/vector-core/src/transform/mod.rs index af81c51aa69a1..b098837f49677 100644 --- a/lib/vector-core/src/transform/mod.rs +++ b/lib/vector-core/src/transform/mod.rs @@ -20,7 +20,7 @@ use crate::{ schema, ByteSizeOf, }; -#[cfg(any(feature = "lua"))] +#[cfg(feature = "lua")] pub mod runtime_transform; /// Transforms come in two variants. Functions, or tasks. diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 008def46a7a20..67054f135b8c8 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.70.0" +channel = "1.71.0" profile = "default" diff --git a/src/conditions/datadog_search.rs b/src/conditions/datadog_search.rs index 27dcdfa57348a..2e2e0e88f1344 100644 --- a/src/conditions/datadog_search.rs +++ b/src/conditions/datadog_search.rs @@ -40,7 +40,7 @@ impl Conditional for DatadogSearchRunner { impl ConditionalConfig for DatadogSearchConfig { fn build(&self, _enrichment_tables: &enrichment::TableRegistry) -> crate::Result { let node = parse(&self.source)?; - let matcher = as_log(build_matcher(&node, &EventFilter::default())); + let matcher = as_log(build_matcher(&node, &EventFilter)); Ok(Condition::DatadogSearch(DatadogSearchRunner { matcher })) } @@ -1039,7 +1039,7 @@ mod test { #[test] /// Parse each Datadog Search Syntax query and check that it passes/fails. fn event_filter() { - test_filter(EventFilter::default(), |ev| ev.into_log()) + test_filter(EventFilter, |ev| ev.into_log()) } #[test] diff --git a/src/config/watcher.rs b/src/config/watcher.rs index 1616a8670969d..bff6fbe7965fe 100644 --- a/src/config/watcher.rs +++ b/src/config/watcher.rs @@ -51,7 +51,7 @@ pub fn spawn_thread<'a>( debug!(message = "Configuration file change detected.", event = ?event); // Consume events until delay amount of time has passed since the latest event. - while let Ok(..) = receiver.recv_timeout(delay) {} + while receiver.recv_timeout(delay).is_ok() {} debug!(message = "Consumed file change events for delay.", delay = ?delay); diff --git a/src/sinks/clickhouse/sink.rs b/src/sinks/clickhouse/sink.rs index 44013b85b2e6e..df2261f1d6738 100644 --- a/src/sinks/clickhouse/sink.rs +++ b/src/sinks/clickhouse/sink.rs @@ -30,7 +30,7 @@ impl ClickhouseSink { encoding: ( transformer, Encoder::::new( - NewlineDelimitedEncoderConfig::default().build().into(), + NewlineDelimitedEncoderConfig.build().into(), JsonSerializerConfig::default().build().into(), ), ), diff --git a/src/sinks/datadog/logs/sink.rs b/src/sinks/datadog/logs/sink.rs index ca1bb60e8de7a..95841fcbbff79 100644 --- a/src/sinks/datadog/logs/sink.rs +++ b/src/sinks/datadog/logs/sink.rs @@ -274,7 +274,7 @@ where async fn run_inner(self: Box, input: BoxStream<'_, Event>) -> Result<(), ()> { let default_api_key = Arc::clone(&self.default_api_key); - let partitioner = EventPartitioner::default(); + let partitioner = EventPartitioner; let builder_limit = NonZeroUsize::new(64); let input = input.batched_partitioned(partitioner, self.batch_settings); diff --git a/src/sinks/datadog/metrics/normalizer.rs b/src/sinks/datadog/metrics/normalizer.rs index 37ecb29198054..d4e6430250e1c 100644 --- a/src/sinks/datadog/metrics/normalizer.rs +++ b/src/sinks/datadog/metrics/normalizer.rs @@ -185,7 +185,7 @@ mod tests { fn run_comparisons(inputs: Vec, expected_outputs: Vec>) { let mut metric_set = MetricSet::default(); - let mut normalizer = DatadogMetricsNormalizer::default(); + let mut normalizer = DatadogMetricsNormalizer; for (input, expected) in inputs.into_iter().zip(expected_outputs) { let result = normalizer.normalize(&mut metric_set, input); diff --git a/src/sinks/greptimedb/service.rs b/src/sinks/greptimedb/service.rs index 2aab611af669e..6f8278b55d598 100644 --- a/src/sinks/greptimedb/service.rs +++ b/src/sinks/greptimedb/service.rs @@ -39,7 +39,7 @@ impl GreptimeDBRequest { let mut finalizers = EventFinalizers::default(); let mut request_metadata_builder = RequestMetadataBuilder::default(); - let sizer = GreptimeDBBatchSizer::default(); + let sizer = GreptimeDBBatchSizer; let mut estimated_request_size = 0; for mut metric in metrics.into_iter() { finalizers.merge(metric.take_finalizers()); diff --git a/src/sinks/greptimedb/sink.rs b/src/sinks/greptimedb/sink.rs index 2b28e50755248..fc8e75140708d 100644 --- a/src/sinks/greptimedb/sink.rs +++ b/src/sinks/greptimedb/sink.rs @@ -37,7 +37,7 @@ impl GreptimeDBSink { .normalized_with_default::() .batched( self.batch_settings - .into_item_size_config(GreptimeDBBatchSizer::default()), + .into_item_size_config(GreptimeDBBatchSizer), ) .map(GreptimeDBRequest::from_metrics) .into_driver(self.service) diff --git a/src/sinks/loki/sink.rs b/src/sinks/loki/sink.rs index 74e133887b6b4..fd6aa01bac23d 100644 --- a/src/sinks/loki/sink.rs +++ b/src/sinks/loki/sink.rs @@ -453,7 +453,7 @@ impl LokiSink { .map(|event| encoder.encode_event(event)) .filter_map(|event| async { event }) .map(|record| filter.filter_record(record)) - .batched_partitioned(RecordPartitioner::default(), self.batch_settings) + .batched_partitioned(RecordPartitioner, self.batch_settings) .filter_map(|(partition, batch)| async { if let Some(partition) = partition { let mut count: usize = 0; diff --git a/src/sinks/mod.rs b/src/sinks/mod.rs index 960e60a3fdd9c..d349167f24f06 100644 --- a/src/sinks/mod.rs +++ b/src/sinks/mod.rs @@ -51,7 +51,7 @@ pub mod elasticsearch; pub mod file; #[cfg(feature = "sinks-gcp")] pub mod gcp; -#[cfg(any(feature = "sinks-gcp"))] +#[cfg(feature = "sinks-gcp")] pub mod gcs_common; #[cfg(feature = "sinks-greptimedb")] pub mod greptimedb; diff --git a/src/sinks/splunk_hec/metrics/sink.rs b/src/sinks/splunk_hec/metrics/sink.rs index 629edceed45cf..f41e397a7acac 100644 --- a/src/sinks/splunk_hec/metrics/sink.rs +++ b/src/sinks/splunk_hec/metrics/sink.rs @@ -65,7 +65,7 @@ where default_namespace, )) }) - .batched_partitioned(EventPartitioner::default(), self.batch_settings) + .batched_partitioned(EventPartitioner, self.batch_settings) .request_builder(builder_limit, self.request_builder) .filter_map(|request| async move { match request { diff --git a/src/sinks/statsd/normalizer.rs b/src/sinks/statsd/normalizer.rs index fc8a9656a636d..e497d074dfb16 100644 --- a/src/sinks/statsd/normalizer.rs +++ b/src/sinks/statsd/normalizer.rs @@ -145,7 +145,7 @@ mod tests { fn run_comparisons(inputs: Vec, expected_outputs: Vec>) { let mut metric_set = MetricSet::default(); - let mut normalizer = StatsdNormalizer::default(); + let mut normalizer = StatsdNormalizer; for (input, expected) in inputs.into_iter().zip(expected_outputs) { let result = normalizer.normalize(&mut metric_set, input); diff --git a/src/sinks/statsd/sink.rs b/src/sinks/statsd/sink.rs index b17097147bed2..a9f0bdf44a9e2 100644 --- a/src/sinks/statsd/sink.rs +++ b/src/sinks/statsd/sink.rs @@ -58,10 +58,7 @@ where // other metric types in type-specific ways i.e. incremental gauge updates use a // different syntax, etc. .normalized_with_default::() - .batched( - self.batch_settings - .into_item_size_config(StatsdBatchSizer::default()), - ) + .batched(self.batch_settings.into_item_size_config(StatsdBatchSizer)) // We build our requests "incrementally", which means that for a single batch of // metrics, we might generate N requests to represent all of the metrics in the batch. // diff --git a/src/sources/mod.rs b/src/sources/mod.rs index 83cd4cedfaa99..b39843357f96b 100644 --- a/src/sources/mod.rs +++ b/src/sources/mod.rs @@ -13,7 +13,7 @@ pub mod aws_kinesis_firehose; pub mod aws_s3; #[cfg(feature = "sources-aws_sqs")] pub mod aws_sqs; -#[cfg(any(feature = "sources-datadog_agent"))] +#[cfg(feature = "sources-datadog_agent")] pub mod datadog_agent; #[cfg(feature = "sources-demo_logs")] pub mod demo_logs; @@ -54,11 +54,11 @@ pub mod journald; pub mod kafka; #[cfg(feature = "sources-kubernetes_logs")] pub mod kubernetes_logs; -#[cfg(all(feature = "sources-logstash"))] +#[cfg(feature = "sources-logstash")] pub mod logstash; #[cfg(feature = "sources-mongodb_metrics")] pub mod mongodb_metrics; -#[cfg(all(feature = "sources-nats"))] +#[cfg(feature = "sources-nats")] pub mod nats; #[cfg(feature = "sources-nginx_metrics")] pub mod nginx_metrics; diff --git a/src/sources/util/grpc/mod.rs b/src/sources/util/grpc/mod.rs index 2795ed0d2d0fb..68c54c5d2a0d6 100644 --- a/src/sources/util/grpc/mod.rs +++ b/src/sources/util/grpc/mod.rs @@ -48,7 +48,7 @@ where // use independent `tower` layers when the request body itself (the body type, not the actual bytes) must be // modified or wrapped.. so instead of a cleaner design, we're opting here to bake it all together until the // crates are sufficiently flexible for us to craft a better design. - .layer(DecompressionAndMetricsLayer::default()) + .layer(DecompressionAndMetricsLayer) .add_service(service) .serve_with_incoming_shutdown(stream, shutdown.map(|token| tx.send(token).unwrap())) .in_current_span() diff --git a/src/sources/util/mod.rs b/src/sources/util/mod.rs index 51b4e4cd15ceb..e7b5706c22d2f 100644 --- a/src/sources/util/mod.rs +++ b/src/sources/util/mod.rs @@ -1,5 +1,5 @@ #![allow(missing_docs)] -#[cfg(any(feature = "sources-http_server"))] +#[cfg(feature = "sources-http_server")] mod body_decoding; mod encoding_config; #[cfg(all(unix, feature = "sources-dnstap"))] @@ -46,7 +46,7 @@ pub use unix_datagram::build_unix_datagram_source; pub use unix_stream::build_unix_stream_source; pub use wrappers::{AfterRead, AfterReadExt}; -#[cfg(any(feature = "sources-http_server"))] +#[cfg(feature = "sources-http_server")] pub use self::body_decoding::Encoding; #[cfg(feature = "sources-utils-http-query")] pub use self::http::add_query_parameters;