Skip to content

Rust 1.85 lints #7019

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Feb 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/workflows/test-suite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@ env:
# Disable debug info (see https://github.com/sigp/lighthouse/issues/4005)
RUSTFLAGS: "-D warnings -C debuginfo=0"
# Prevent Github API rate limiting.
LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# NOTE: this token is a personal access token on Jimmy's account due to the default GITHUB_TOKEN
# not having access to other repositories. We should eventually devise a better solution here.
LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.LIGHTHOUSE_GITHUB_TOKEN }}
# Enable self-hosted runners for the sigp repo only.
SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }}
# Self-hosted runners need to reference a different host for `./watch` tests.
Expand Down
28 changes: 13 additions & 15 deletions beacon_node/beacon_chain/src/attestation_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1450,19 +1450,17 @@ where
return Err(Error::UnknownTargetRoot(target.root));
}

chain
.with_committee_cache(target.root, attestation_epoch, |committee_cache, _| {
let committees_per_slot = committee_cache.committees_per_slot();

Ok(committee_cache
.get_beacon_committees_at_slot(attestation.data().slot)
.map(|committees| map_fn((committees, committees_per_slot)))
.unwrap_or_else(|_| {
Err(Error::NoCommitteeForSlotAndIndex {
slot: attestation.data().slot,
index: attestation.committee_index().unwrap_or(0),
})
}))
})
.map_err(BeaconChainError::from)?
chain.with_committee_cache(target.root, attestation_epoch, |committee_cache, _| {
let committees_per_slot = committee_cache.committees_per_slot();

Ok(committee_cache
.get_beacon_committees_at_slot(attestation.data().slot)
.map(|committees| map_fn((committees, committees_per_slot)))
.unwrap_or_else(|_| {
Err(Error::NoCommitteeForSlotAndIndex {
slot: attestation.data().slot,
index: attestation.committee_index().unwrap_or(0),
})
}))
})?
}
6 changes: 3 additions & 3 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6506,9 +6506,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

/// Returns `true` if the given slot is prior to the `bellatrix_fork_epoch`.
pub fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool {
self.spec.bellatrix_fork_epoch.map_or(true, |bellatrix| {
slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix
})
self.spec
.bellatrix_fork_epoch
.is_none_or(|bellatrix| slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix)
}

/// Returns the value of `execution_optimistic` for `block`.
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/beacon_chain/src/block_times_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ impl BlockTimesCache {
if block_times
.timestamps
.all_blobs_observed
.map_or(true, |prev| timestamp > prev)
.is_none_or(|prev| timestamp > prev)
{
block_times.timestamps.all_blobs_observed = Some(timestamp);
}
Expand All @@ -195,7 +195,7 @@ impl BlockTimesCache {
.entry(block_root)
.or_insert_with(|| BlockTimesCacheValue::new(slot));
let existing_timestamp = field(&mut block_times.timestamps);
if existing_timestamp.map_or(true, |prev| timestamp < prev) {
if existing_timestamp.is_none_or(|prev| timestamp < prev) {
*existing_timestamp = Some(timestamp);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,6 @@ impl<E: EthSpec> PendingComponents<E> {
.map(|b| b.map(|b| b.to_blob()))
.take(num_blobs_expected)
.collect::<Option<Vec<_>>>()
.map(Into::into)
else {
return Err(AvailabilityCheckError::Unexpected);
};
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/shuffling_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ impl ShufflingCache {
.get(&key)
// Replace the committee if it's not present or if it's a promise. A bird in the hand is
// worth two in the promise-bush!
.map_or(true, CacheItem::is_promise)
.is_none_or(CacheItem::is_promise)
{
self.insert_cache_item(
key,
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/validator_monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@ impl<E: EthSpec> ValidatorMonitor<E> {
// the proposer shuffling cache lock when there are lots of missed blocks.
if proposers_per_epoch
.as_ref()
.map_or(true, |(_, cached_epoch)| *cached_epoch != slot_epoch)
.is_none_or(|(_, cached_epoch)| *cached_epoch != slot_epoch)
{
proposers_per_epoch = self
.get_proposers_by_epoch_from_cache(
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/client/src/notifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
let is_backfilling = matches!(current_sync_state, SyncState::BackFillSyncing { .. });
if is_backfilling
&& last_backfill_log_slot
.map_or(true, |slot| slot + BACKFILL_LOG_INTERVAL <= current_slot)
.is_none_or(|slot| slot + BACKFILL_LOG_INTERVAL <= current_slot)
{
last_backfill_log_slot = Some(current_slot);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ impl<E: EthSpec> ExecutionBlockGenerator<E> {
if self
.head_block
.as_ref()
.map_or(true, |head| head.block_hash() == last_block_hash)
.is_none_or(|head| head.block_hash() == last_block_hash)
{
self.head_block = Some(block.clone());
}
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/genesis/src/eth1_genesis_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ impl Eth1GenesisService {
// again later.
if eth1_service
.highest_safe_block()
.map_or(true, |n| block.number > n)
.is_none_or(|n| block.number > n)
{
continue;
}
Expand Down
8 changes: 4 additions & 4 deletions beacon_node/http_api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1939,10 +1939,10 @@ pub fn serve<T: BeaconChainTypes>(
query: api_types::AttestationPoolQuery| {
task_spawner.blocking_response_task(Priority::P1, move || {
let query_filter = |data: &AttestationData| {
query.slot.map_or(true, |slot| slot == data.slot)
query.slot.is_none_or(|slot| slot == data.slot)
&& query
.committee_index
.map_or(true, |index| index == data.index)
.is_none_or(|index| index == data.index)
};

let mut attestations = chain.op_pool.get_filtered_attestations(query_filter);
Expand Down Expand Up @@ -3159,11 +3159,11 @@ pub fn serve<T: BeaconChainTypes>(
peer_info.connection_status(),
);

let state_matches = query.state.as_ref().map_or(true, |states| {
let state_matches = query.state.as_ref().is_none_or(|states| {
states.iter().any(|state_param| *state_param == state)
});
let direction_matches =
query.direction.as_ref().map_or(true, |directions| {
query.direction.as_ref().is_none_or(|directions| {
directions.iter().any(|dir_param| *dir_param == direction)
});

Expand Down
4 changes: 2 additions & 2 deletions beacon_node/http_api/src/produce_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ pub async fn produce_blinded_block_v2<T: BeaconChainTypes>(
.produce_block_with_verification(
randao_reveal,
slot,
query.graffiti.map(Into::into),
query.graffiti,
randao_verification,
None,
BlockProductionVersion::BlindedV2,
Expand Down Expand Up @@ -178,7 +178,7 @@ pub async fn produce_block_v2<T: BeaconChainTypes>(
.produce_block_with_verification(
randao_reveal,
slot,
query.graffiti.map(Into::into),
query.graffiti,
randao_verification,
None,
BlockProductionVersion::FullV2,
Expand Down
6 changes: 3 additions & 3 deletions beacon_node/http_api/src/validators.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub fn get_beacon_state_validators<T: BeaconChainTypes>(
.enumerate()
// filter by validator id(s) if provided
.filter(|(index, (validator, _))| {
ids_filter_set.as_ref().map_or(true, |ids_set| {
ids_filter_set.as_ref().is_none_or(|ids_set| {
ids_set.contains(&ValidatorId::PublicKey(validator.pubkey))
|| ids_set.contains(&ValidatorId::Index(*index as u64))
})
Expand All @@ -42,7 +42,7 @@ pub fn get_beacon_state_validators<T: BeaconChainTypes>(
far_future_epoch,
);

let status_matches = query_statuses.as_ref().map_or(true, |statuses| {
let status_matches = query_statuses.as_ref().is_none_or(|statuses| {
statuses.contains(&status)
|| statuses.contains(&status.superstatus())
});
Expand Down Expand Up @@ -92,7 +92,7 @@ pub fn get_beacon_state_validator_balances<T: BeaconChainTypes>(
.enumerate()
// filter by validator id(s) if provided
.filter(|(index, (validator, _))| {
ids_filter_set.as_ref().map_or(true, |ids_set| {
ids_filter_set.as_ref().is_none_or(|ids_set| {
ids_set.contains(&ValidatorId::PublicKey(validator.pubkey))
|| ids_set.contains(&ValidatorId::Index(*index as u64))
})
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/http_api/tests/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2450,8 +2450,8 @@ impl ApiTester {
};

let state_match =
states.map_or(true, |states| states.contains(&PeerState::Connected));
let dir_match = dirs.map_or(true, |dirs| dirs.contains(&PeerDirection::Inbound));
states.is_none_or(|states| states.contains(&PeerState::Connected));
let dir_match = dirs.is_none_or(|dirs| dirs.contains(&PeerDirection::Inbound));

let mut expected_peers = Vec::new();
if state_match && dir_match {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ impl<E: EthSpec> NetworkBehaviour for PeerManager<E> {
.peers
.read()
.peer_info(&peer_id)
.map_or(true, |peer| !peer.has_future_duty())
.is_none_or(|peer| !peer.has_future_duty())
{
return Err(ConnectionDenied::new(
"Connection to peer rejected: too many connections",
Expand Down Expand Up @@ -240,7 +240,7 @@ impl<E: EthSpec> NetworkBehaviour for PeerManager<E> {
.peers
.read()
.peer_info(&peer_id)
.map_or(true, |peer| !peer.has_future_duty())
.is_none_or(|peer| !peer.has_future_duty())
{
return Err(ConnectionDenied::new(
"Connection to peer rejected: too many connections",
Expand Down
4 changes: 1 addition & 3 deletions beacon_node/network/src/network_beacon_processor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,7 @@ const BLOB_PUBLICATION_EXP_FACTOR: usize = 2;

impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
fn try_send(&self, event: BeaconWorkEvent<T::EthSpec>) -> Result<(), Error<T::EthSpec>> {
self.beacon_processor_send
.try_send(event)
.map_err(Into::into)
self.beacon_processor_send.try_send(event)
}

/// Create a new `Work` event for some `SingleAttestation`.
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/operation_pool/src/bls_to_execution_changes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ impl<E: EthSpec> BlsToExecutionChanges<E> {
head_state
.validators()
.get(validator_index as usize)
.map_or(true, |validator| {
.is_none_or(|validator| {
let prune = validator.has_execution_withdrawal_credential(spec)
&& head_block
.message()
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/operation_pool/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -767,7 +767,7 @@ fn prune_validator_hash_map<T, F, E: EthSpec>(
&& head_state
.validators()
.get(validator_index as usize)
.map_or(true, |validator| !prune_if(validator_index, validator))
.is_none_or(|validator| !prune_if(validator_index, validator))
});
}

Expand Down
2 changes: 1 addition & 1 deletion beacon_node/operation_pool/src/reward_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ impl RewardCache {
if self
.initialization
.as_ref()
.map_or(true, |init| *init != new_init)
.is_none_or(|init| *init != new_init)
{
self.update_previous_epoch_participation(state)
.map_err(OpPoolError::RewardCacheUpdatePrevEpoch)?;
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/store/src/hot_cold_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -903,7 +903,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
state_root: &Hash256,
summary: HotStateSummary,
) -> Result<(), Error> {
self.hot_db.put(state_root, &summary).map_err(Into::into)
self.hot_db.put(state_root, &summary)
}

/// Store a state in the store.
Expand Down Expand Up @@ -1248,7 +1248,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
state_root.as_slice().to_vec(),
));

if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) {
if slot.is_none_or(|slot| slot % E::slots_per_epoch() == 0) {
key_value_batch.push(KeyValueStoreOp::DeleteKey(
DBColumn::BeaconState,
state_root.as_slice().to_vec(),
Expand Down
2 changes: 0 additions & 2 deletions beacon_node/store/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,15 +195,13 @@ pub trait ItemStore<E: EthSpec>: KeyValueStore<E> + Sync + Send + Sized + 'stati
let key = key.as_slice();

self.put_bytes(column, key, &item.as_store_bytes())
.map_err(Into::into)
}

fn put_sync<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> {
let column = I::db_column();
let key = key.as_slice();

self.put_bytes_sync(column, key, &item.as_store_bytes())
.map_err(Into::into)
}

/// Retrieve an item from `Self`.
Expand Down
1 change: 0 additions & 1 deletion common/account_utils/src/validator_definitions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ impl SigningDefinition {
voting_keystore_password_path: Some(path),
..
} => read_password_string(path)
.map(Into::into)
.map(Option::Some)
.map_err(Error::UnableToReadKeystorePassword),
SigningDefinition::LocalKeystore { .. } => Err(Error::KeystoreWithoutPassword),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,6 @@ where
)?);
Ok(())
})
.map_err(Error::into)
}

/// Includes all signatures in `self.block.body.voluntary_exits` for verification.
Expand Down
1 change: 1 addition & 0 deletions consensus/types/src/beacon_block_body.rs
Original file line number Diff line number Diff line change
Expand Up @@ -971,6 +971,7 @@ impl<E: EthSpec> From<BeaconBlockBody<E, FullPayload<E>>>
Option<ExecutionPayload<E>>,
)
{
#[allow(clippy::useless_conversion)] // Not a useless conversion
fn from(body: BeaconBlockBody<E, FullPayload<E>>) -> Self {
map_beacon_block_body!(body, |inner, cons| {
let (block, payload) = inner.into();
Expand Down
10 changes: 5 additions & 5 deletions consensus/types/src/runtime_var_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,13 +134,13 @@ impl<T: Decode> RuntimeVariableList<T> {
)));
}

bytes
.chunks(<T as Decode>::ssz_fixed_len())
.try_fold(Vec::with_capacity(num_items), |mut vec, chunk| {
bytes.chunks(<T as Decode>::ssz_fixed_len()).try_fold(
Vec::with_capacity(num_items),
|mut vec, chunk| {
vec.push(<T as Decode>::from_ssz_bytes(chunk)?);
Ok(vec)
})
.map(Into::into)?
},
)?
} else {
ssz::decode_list_of_variable_length_items(bytes, Some(max_len))?
};
Expand Down
2 changes: 1 addition & 1 deletion slasher/src/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ impl<E: EthSpec> SlasherDB<E> {
target: Epoch,
prev_max_target: Option<Epoch>,
) -> Result<Option<CompactAttesterRecord>, Error> {
if prev_max_target.map_or(true, |prev_max| target > prev_max) {
if prev_max_target.is_none_or(|prev_max| target > prev_max) {
return Ok(None);
}

Expand Down
Loading