Skip to content

Commit 1dd505f

Browse files
authored
chore(deps): Update to Rust 1.71.0 (vectordotdev#18075)
* chore(deps): Update to Rust 1.71.0 Signed-off-by: Jesse Szwedko <[email protected]> * clippy Signed-off-by: Jesse Szwedko <[email protected]> * fmt Signed-off-by: Jesse Szwedko <[email protected]> * clippy Signed-off-by: Jesse Szwedko <[email protected]> --------- Signed-off-by: Jesse Szwedko <[email protected]>
1 parent 3968325 commit 1dd505f

File tree

22 files changed

+34
-37
lines changed

22 files changed

+34
-37
lines changed

Tiltfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ load('ext://helm_resource', 'helm_resource', 'helm_repo')
77
docker_build(
88
ref='timberio/vector',
99
context='.',
10-
build_args={'RUST_VERSION': '1.70.0'},
10+
build_args={'RUST_VERSION': '1.71.0'},
1111
dockerfile='tilt/Dockerfile'
1212
)
1313

lib/codecs/src/decoding/format/native.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ pub struct NativeDeserializerConfig;
1919
impl NativeDeserializerConfig {
2020
/// Build the `NativeDeserializer` from this configuration.
2121
pub fn build(&self) -> NativeDeserializer {
22-
NativeDeserializer::default()
22+
NativeDeserializer
2323
}
2424

2525
/// Return the type of event build by this deserializer.

lib/vector-common/src/finalizer.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ where
6262
Self {
6363
sender: Some(todo_tx),
6464
flush: flush1,
65-
_phantom: PhantomData::default(),
65+
_phantom: PhantomData,
6666
},
6767
finalizer_stream(shutdown, todo_rx, S::default(), flush2).boxed(),
6868
)
@@ -199,7 +199,7 @@ pub struct EmptyStream<T>(PhantomData<T>);
199199

200200
impl<T> Default for EmptyStream<T> {
201201
fn default() -> Self {
202-
Self(PhantomData::default())
202+
Self(PhantomData)
203203
}
204204
}
205205

lib/vector-config/src/schema/visitors/human_name.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ mod tests {
127127
}
128128
}));
129129

130-
let mut visitor = GenerateHumanFriendlyNameVisitor::default();
130+
let mut visitor = GenerateHumanFriendlyNameVisitor;
131131
visitor.visit_root_schema(&mut actual_schema);
132132

133133
assert_schemas_eq(expected_schema, actual_schema);
@@ -150,7 +150,7 @@ mod tests {
150150
}
151151
}));
152152

153-
let mut visitor = GenerateHumanFriendlyNameVisitor::default();
153+
let mut visitor = GenerateHumanFriendlyNameVisitor;
154154
visitor.visit_root_schema(&mut actual_schema);
155155

156156
assert_schemas_eq(expected_schema, actual_schema);
@@ -177,7 +177,7 @@ mod tests {
177177
}
178178
}));
179179

180-
let mut visitor = GenerateHumanFriendlyNameVisitor::default();
180+
let mut visitor = GenerateHumanFriendlyNameVisitor;
181181
visitor.visit_root_schema(&mut actual_schema);
182182

183183
assert_schemas_eq(expected_schema, actual_schema);
@@ -204,7 +204,7 @@ mod tests {
204204
}
205205
}));
206206

207-
let mut visitor = GenerateHumanFriendlyNameVisitor::default();
207+
let mut visitor = GenerateHumanFriendlyNameVisitor;
208208
visitor.visit_root_schema(&mut actual_schema);
209209

210210
assert_schemas_eq(expected_schema, actual_schema);
@@ -222,7 +222,7 @@ mod tests {
222222

223223
let expected_schema = actual_schema.clone();
224224

225-
let mut visitor = GenerateHumanFriendlyNameVisitor::default();
225+
let mut visitor = GenerateHumanFriendlyNameVisitor;
226226
visitor.visit_root_schema(&mut actual_schema);
227227

228228
assert_schemas_eq(expected_schema, actual_schema);
@@ -244,7 +244,7 @@ mod tests {
244244

245245
let expected_schema = actual_schema.clone();
246246

247-
let mut visitor = GenerateHumanFriendlyNameVisitor::default();
247+
let mut visitor = GenerateHumanFriendlyNameVisitor;
248248
visitor.visit_root_schema(&mut actual_schema);
249249

250250
assert_schemas_eq(expected_schema, actual_schema);
@@ -278,7 +278,7 @@ mod tests {
278278
}
279279
}));
280280

281-
let mut visitor = GenerateHumanFriendlyNameVisitor::default();
281+
let mut visitor = GenerateHumanFriendlyNameVisitor;
282282
visitor.visit_root_schema(&mut actual_schema);
283283

284284
assert_schemas_eq(expected_schema, actual_schema);

lib/vector-core/src/tls/incoming.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ impl MaybeTlsIncomingStream<TcpStream> {
263263
where
264264
F: FnOnce(Pin<&mut MaybeTlsStream<TcpStream>>, &mut Context) -> Poll<io::Result<T>>,
265265
{
266-
let mut this = self.get_mut();
266+
let this = self.get_mut();
267267
loop {
268268
return match &mut this.state {
269269
StreamState::Accepted(stream) => poll_fn(Pin::new(stream), cx),
@@ -307,7 +307,7 @@ impl AsyncWrite for MaybeTlsIncomingStream<TcpStream> {
307307
}
308308

309309
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
310-
let mut this = self.get_mut();
310+
let this = self.get_mut();
311311
match &mut this.state {
312312
StreamState::Accepted(stream) => match Pin::new(stream).poll_shutdown(cx) {
313313
Poll::Ready(Ok(())) => {

lib/vector-core/src/transform/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ use crate::{
2020
schema, ByteSizeOf,
2121
};
2222

23-
#[cfg(any(feature = "lua"))]
23+
#[cfg(feature = "lua")]
2424
pub mod runtime_transform;
2525

2626
/// Transforms come in two variants. Functions, or tasks.

rust-toolchain.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
[toolchain]
2-
channel = "1.70.0"
2+
channel = "1.71.0"
33
profile = "default"

src/conditions/datadog_search.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ impl Conditional for DatadogSearchRunner {
4040
impl ConditionalConfig for DatadogSearchConfig {
4141
fn build(&self, _enrichment_tables: &enrichment::TableRegistry) -> crate::Result<Condition> {
4242
let node = parse(&self.source)?;
43-
let matcher = as_log(build_matcher(&node, &EventFilter::default()));
43+
let matcher = as_log(build_matcher(&node, &EventFilter));
4444

4545
Ok(Condition::DatadogSearch(DatadogSearchRunner { matcher }))
4646
}
@@ -1039,7 +1039,7 @@ mod test {
10391039
#[test]
10401040
/// Parse each Datadog Search Syntax query and check that it passes/fails.
10411041
fn event_filter() {
1042-
test_filter(EventFilter::default(), |ev| ev.into_log())
1042+
test_filter(EventFilter, |ev| ev.into_log())
10431043
}
10441044

10451045
#[test]

src/config/watcher.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ pub fn spawn_thread<'a>(
5151
debug!(message = "Configuration file change detected.", event = ?event);
5252

5353
// Consume events until delay amount of time has passed since the latest event.
54-
while let Ok(..) = receiver.recv_timeout(delay) {}
54+
while receiver.recv_timeout(delay).is_ok() {}
5555

5656
debug!(message = "Consumed file change events for delay.", delay = ?delay);
5757

src/sinks/clickhouse/sink.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ impl ClickhouseSink {
3030
encoding: (
3131
transformer,
3232
Encoder::<Framer>::new(
33-
NewlineDelimitedEncoderConfig::default().build().into(),
33+
NewlineDelimitedEncoderConfig.build().into(),
3434
JsonSerializerConfig::default().build().into(),
3535
),
3636
),

src/sinks/datadog/logs/sink.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ where
274274
async fn run_inner(self: Box<Self>, input: BoxStream<'_, Event>) -> Result<(), ()> {
275275
let default_api_key = Arc::clone(&self.default_api_key);
276276

277-
let partitioner = EventPartitioner::default();
277+
let partitioner = EventPartitioner;
278278

279279
let builder_limit = NonZeroUsize::new(64);
280280
let input = input.batched_partitioned(partitioner, self.batch_settings);

src/sinks/datadog/metrics/normalizer.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ mod tests {
185185

186186
fn run_comparisons(inputs: Vec<Metric>, expected_outputs: Vec<Option<Metric>>) {
187187
let mut metric_set = MetricSet::default();
188-
let mut normalizer = DatadogMetricsNormalizer::default();
188+
let mut normalizer = DatadogMetricsNormalizer;
189189

190190
for (input, expected) in inputs.into_iter().zip(expected_outputs) {
191191
let result = normalizer.normalize(&mut metric_set, input);

src/sinks/greptimedb/service.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ impl GreptimeDBRequest {
3939
let mut finalizers = EventFinalizers::default();
4040
let mut request_metadata_builder = RequestMetadataBuilder::default();
4141

42-
let sizer = GreptimeDBBatchSizer::default();
42+
let sizer = GreptimeDBBatchSizer;
4343
let mut estimated_request_size = 0;
4444
for mut metric in metrics.into_iter() {
4545
finalizers.merge(metric.take_finalizers());

src/sinks/greptimedb/sink.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ impl GreptimeDBSink {
3737
.normalized_with_default::<GreptimeDBMetricNormalize>()
3838
.batched(
3939
self.batch_settings
40-
.into_item_size_config(GreptimeDBBatchSizer::default()),
40+
.into_item_size_config(GreptimeDBBatchSizer),
4141
)
4242
.map(GreptimeDBRequest::from_metrics)
4343
.into_driver(self.service)

src/sinks/loki/sink.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ impl LokiSink {
453453
.map(|event| encoder.encode_event(event))
454454
.filter_map(|event| async { event })
455455
.map(|record| filter.filter_record(record))
456-
.batched_partitioned(RecordPartitioner::default(), self.batch_settings)
456+
.batched_partitioned(RecordPartitioner, self.batch_settings)
457457
.filter_map(|(partition, batch)| async {
458458
if let Some(partition) = partition {
459459
let mut count: usize = 0;

src/sinks/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ pub mod elasticsearch;
5151
pub mod file;
5252
#[cfg(feature = "sinks-gcp")]
5353
pub mod gcp;
54-
#[cfg(any(feature = "sinks-gcp"))]
54+
#[cfg(feature = "sinks-gcp")]
5555
pub mod gcs_common;
5656
#[cfg(feature = "sinks-greptimedb")]
5757
pub mod greptimedb;

src/sinks/splunk_hec/metrics/sink.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ where
6565
default_namespace,
6666
))
6767
})
68-
.batched_partitioned(EventPartitioner::default(), self.batch_settings)
68+
.batched_partitioned(EventPartitioner, self.batch_settings)
6969
.request_builder(builder_limit, self.request_builder)
7070
.filter_map(|request| async move {
7171
match request {

src/sinks/statsd/normalizer.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ mod tests {
145145

146146
fn run_comparisons(inputs: Vec<Metric>, expected_outputs: Vec<Option<Metric>>) {
147147
let mut metric_set = MetricSet::default();
148-
let mut normalizer = StatsdNormalizer::default();
148+
let mut normalizer = StatsdNormalizer;
149149

150150
for (input, expected) in inputs.into_iter().zip(expected_outputs) {
151151
let result = normalizer.normalize(&mut metric_set, input);

src/sinks/statsd/sink.rs

+1-4
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,7 @@ where
5858
// other metric types in type-specific ways i.e. incremental gauge updates use a
5959
// different syntax, etc.
6060
.normalized_with_default::<StatsdNormalizer>()
61-
.batched(
62-
self.batch_settings
63-
.into_item_size_config(StatsdBatchSizer::default()),
64-
)
61+
.batched(self.batch_settings.into_item_size_config(StatsdBatchSizer))
6562
// We build our requests "incrementally", which means that for a single batch of
6663
// metrics, we might generate N requests to represent all of the metrics in the batch.
6764
//

src/sources/mod.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ pub mod aws_kinesis_firehose;
1313
pub mod aws_s3;
1414
#[cfg(feature = "sources-aws_sqs")]
1515
pub mod aws_sqs;
16-
#[cfg(any(feature = "sources-datadog_agent"))]
16+
#[cfg(feature = "sources-datadog_agent")]
1717
pub mod datadog_agent;
1818
#[cfg(feature = "sources-demo_logs")]
1919
pub mod demo_logs;
@@ -54,11 +54,11 @@ pub mod journald;
5454
pub mod kafka;
5555
#[cfg(feature = "sources-kubernetes_logs")]
5656
pub mod kubernetes_logs;
57-
#[cfg(all(feature = "sources-logstash"))]
57+
#[cfg(feature = "sources-logstash")]
5858
pub mod logstash;
5959
#[cfg(feature = "sources-mongodb_metrics")]
6060
pub mod mongodb_metrics;
61-
#[cfg(all(feature = "sources-nats"))]
61+
#[cfg(feature = "sources-nats")]
6262
pub mod nats;
6363
#[cfg(feature = "sources-nginx_metrics")]
6464
pub mod nginx_metrics;

src/sources/util/grpc/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ where
4848
// use independent `tower` layers when the request body itself (the body type, not the actual bytes) must be
4949
// modified or wrapped.. so instead of a cleaner design, we're opting here to bake it all together until the
5050
// crates are sufficiently flexible for us to craft a better design.
51-
.layer(DecompressionAndMetricsLayer::default())
51+
.layer(DecompressionAndMetricsLayer)
5252
.add_service(service)
5353
.serve_with_incoming_shutdown(stream, shutdown.map(|token| tx.send(token).unwrap()))
5454
.in_current_span()

src/sources/util/mod.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#![allow(missing_docs)]
2-
#[cfg(any(feature = "sources-http_server"))]
2+
#[cfg(feature = "sources-http_server")]
33
mod body_decoding;
44
mod encoding_config;
55
#[cfg(all(unix, feature = "sources-dnstap"))]
@@ -46,7 +46,7 @@ pub use unix_datagram::build_unix_datagram_source;
4646
pub use unix_stream::build_unix_stream_source;
4747
pub use wrappers::{AfterRead, AfterReadExt};
4848

49-
#[cfg(any(feature = "sources-http_server"))]
49+
#[cfg(feature = "sources-http_server")]
5050
pub use self::body_decoding::Encoding;
5151
#[cfg(feature = "sources-utils-http-query")]
5252
pub use self::http::add_query_parameters;

0 commit comments

Comments
 (0)