Skip to content

Commit 5083095

Browse files
committed
Clippy
1 parent b8291e3 commit 5083095

File tree

5 files changed

+33
-23
lines changed

5 files changed

+33
-23
lines changed

mistralrs-core/src/vision_models/conformer/encoder.rs

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)]
2+
13
use std::sync::Arc;
24

35
use candle_core::{IndexOp, Result, Tensor, D};
@@ -340,11 +342,7 @@ impl ConvModule {
340342

341343
let fix_len1;
342344
let ext_pw_conv_1d = if cfg.causal {
343-
if cfg.ext_pw_kernel_size > 1 {
344-
fix_len1 = true;
345-
} else {
346-
fix_len1 = false;
347-
}
345+
fix_len1 = cfg.ext_pw_kernel_size > 1;
348346
layers::conv1d(
349347
cfg.attention_dim,
350348
cfg.ext_pw_out_channel,
@@ -383,7 +381,7 @@ impl ConvModule {
383381
None
384382
};
385383

386-
assert_eq!(cfg.linear_glu_in_convm, false);
384+
assert!(!cfg.linear_glu_in_convm);
387385
let glu = GLUPointWiseConv::new(cfg, vb.pp("glu"))?;
388386

389387
Ok(Self {

mistralrs-core/src/vision_models/conformer/nemo.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)]
2+
13
use std::sync::Arc;
24

35
use candle_core::{Result, Tensor};
@@ -29,7 +31,7 @@ impl NemoConvSubsampling {
2931
let kernel_size = 3;
3032
let ceil_mode = false;
3133

32-
assert_eq!(cfg.is_causal, false);
34+
assert!(!cfg.is_causal);
3335
assert_eq!(cfg.subsampling, "dw_striding");
3436

3537
let left_padding = (kernel_size - 1) / 2;
@@ -50,7 +52,7 @@ impl NemoConvSubsampling {
5052
kernel_size,
5153
Conv2dConfig {
5254
padding: left_padding,
53-
stride: stride,
55+
stride,
5456
dilation: 1,
5557
groups: 1,
5658
},
@@ -69,7 +71,7 @@ impl NemoConvSubsampling {
6971
kernel_size,
7072
Conv2dConfig {
7173
padding: left_padding,
72-
stride: stride,
74+
stride,
7375
dilation: 1,
7476
groups: in_channels,
7577
},

mistralrs-core/src/vision_models/conformer/pos_embed.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)]
2+
13
use candle_core::{DType, Device, IndexOp, Result, Tensor, D};
24
use candle_nn::{Embedding, Module};
35
use mistralrs_quant::ShardedVarBuilder;

mistralrs-core/src/vision_models/phi4/inputs_processor.rs

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ pub(crate) const DYHD_BASE_RESOLUTION: usize = 448;
4646

4747
const AUDIO_FEATURE_SIZE: usize = 80; // mel bins
4848

49+
type AudioProcessingResult = Result<(Option<Tensor>, Option<Vec<usize>>, Option<Tensor>)>;
50+
4951
// Input processor
5052
pub struct Phi4MMInputsProcessor {
5153
audio_compression_rate: usize,
@@ -648,16 +650,26 @@ impl Phi4MMInputsProcessor {
648650
let right_bin = bin_centers[m + 2];
649651

650652
// Left slope
651-
for bin in left_bin..center_bin {
653+
for (bin, filter) in filter
654+
.iter_mut()
655+
.enumerate()
656+
.take(center_bin)
657+
.skip(left_bin)
658+
{
652659
if bin < bank_width {
653-
filter[bin] = (bin - left_bin) as f32 / (center_bin - left_bin) as f32;
660+
*filter = (bin - left_bin) as f32 / (center_bin - left_bin) as f32;
654661
}
655662
}
656663

657664
// Right slope
658-
for bin in center_bin..right_bin {
665+
for (bin, filter) in filter
666+
.iter_mut()
667+
.enumerate()
668+
.take(right_bin)
669+
.skip(center_bin)
670+
{
659671
if bin < bank_width {
660-
filter[bin] = (right_bin - bin) as f32 / (right_bin - center_bin) as f32;
672+
*filter = (right_bin - bin) as f32 / (right_bin - center_bin) as f32;
661673
}
662674
}
663675

@@ -710,13 +722,11 @@ impl Phi4MMInputsProcessor {
710722
&self,
711723
input_seqs: &[&mut Sequence],
712724
device: &Device,
713-
) -> Result<(Option<Tensor>, Option<Vec<usize>>, Option<Tensor>)> {
725+
) -> AudioProcessingResult {
714726
// Check if any sequence has audio tokens
715-
let has_audio_tokens = input_seqs.iter().any(|seq| {
716-
seq.get_toks()
717-
.iter()
718-
.any(|&token| token == AUDIO_SPECIAL_TOKEN_ID as u32)
719-
});
727+
let has_audio_tokens = input_seqs
728+
.iter()
729+
.any(|seq| seq.get_toks().contains(&(AUDIO_SPECIAL_TOKEN_ID as u32)));
720730

721731
if !has_audio_tokens {
722732
return Ok((None, None, None));
@@ -728,10 +738,7 @@ impl Phi4MMInputsProcessor {
728738

729739
// Process audio for each sequence that needs it
730740
for seq in input_seqs.iter() {
731-
let has_audio = seq
732-
.get_toks()
733-
.iter()
734-
.any(|&token| token == AUDIO_SPECIAL_TOKEN_ID as u32);
741+
let has_audio = seq.get_toks().contains(&(AUDIO_SPECIAL_TOKEN_ID as u32));
735742

736743
if has_audio {
737744
// Load dummy audio (TODO: make this per-sequence)

mistralrs-core/src/vision_models/phi4/mm_embedding.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ impl Phi4MMImageAudioEmbedding {
5656
})
5757
}
5858

59+
#[allow(clippy::too_many_arguments)]
5960
pub fn forward(
6061
&self,
6162
input_ids: &Tensor,

0 commit comments

Comments
 (0)