@@ -2828,13 +2828,9 @@ static int tcp_try_undo_loss(struct sock *sk)
2828
2828
static inline void tcp_complete_cwr (struct sock * sk )
2829
2829
{
2830
2830
struct tcp_sock * tp = tcp_sk (sk );
2831
-
2832
- /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2833
- if (tp -> undo_marker ) {
2834
- if (inet_csk (sk )-> icsk_ca_state == TCP_CA_CWR )
2835
- tp -> snd_cwnd = min (tp -> snd_cwnd , tp -> snd_ssthresh );
2836
- else /* PRR */
2837
- tp -> snd_cwnd = tp -> snd_ssthresh ;
2831
+ /* Do not moderate cwnd if it's already undone in cwr or recovery */
2832
+ if (tp -> undo_marker && tp -> snd_cwnd > tp -> snd_ssthresh ) {
2833
+ tp -> snd_cwnd = tp -> snd_ssthresh ;
2838
2834
tp -> snd_cwnd_stamp = tcp_time_stamp ;
2839
2835
}
2840
2836
tcp_ca_event (sk , CA_EVENT_COMPLETE_CWR );
@@ -2952,38 +2948,6 @@ void tcp_simple_retransmit(struct sock *sk)
2952
2948
}
2953
2949
EXPORT_SYMBOL (tcp_simple_retransmit );
2954
2950
2955
- /* This function implements the PRR algorithm, specifcally the PRR-SSRB
2956
- * (proportional rate reduction with slow start reduction bound) as described in
2957
- * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
2958
- * It computes the number of packets to send (sndcnt) based on packets newly
2959
- * delivered:
2960
- * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2961
- * cwnd reductions across a full RTT.
2962
- * 2) If packets in flight is lower than ssthresh (such as due to excess
2963
- * losses and/or application stalls), do not perform any further cwnd
2964
- * reductions, but instead slow start up to ssthresh.
2965
- */
2966
- static void tcp_update_cwnd_in_recovery (struct sock * sk , int newly_acked_sacked ,
2967
- int fast_rexmit , int flag )
2968
- {
2969
- struct tcp_sock * tp = tcp_sk (sk );
2970
- int sndcnt = 0 ;
2971
- int delta = tp -> snd_ssthresh - tcp_packets_in_flight (tp );
2972
-
2973
- if (tcp_packets_in_flight (tp ) > tp -> snd_ssthresh ) {
2974
- u64 dividend = (u64 )tp -> snd_ssthresh * tp -> prr_delivered +
2975
- tp -> prior_cwnd - 1 ;
2976
- sndcnt = div_u64 (dividend , tp -> prior_cwnd ) - tp -> prr_out ;
2977
- } else {
2978
- sndcnt = min_t (int , delta ,
2979
- max_t (int , tp -> prr_delivered - tp -> prr_out ,
2980
- newly_acked_sacked ) + 1 );
2981
- }
2982
-
2983
- sndcnt = max (sndcnt , (fast_rexmit ? 1 : 0 ));
2984
- tp -> snd_cwnd = tcp_packets_in_flight (tp ) + sndcnt ;
2985
- }
2986
-
2987
2951
/* Process an event, which can update packets-in-flight not trivially.
2988
2952
* Main goal of this function is to calculate new estimate for left_out,
2989
2953
* taking into account both packets sitting in receiver's buffer and
@@ -2995,8 +2959,7 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
2995
2959
* It does _not_ decide what to send, it is made in function
2996
2960
* tcp_xmit_retransmit_queue().
2997
2961
*/
2998
- static void tcp_fastretrans_alert (struct sock * sk , int pkts_acked ,
2999
- int newly_acked_sacked , int flag )
2962
+ static void tcp_fastretrans_alert (struct sock * sk , int pkts_acked , int flag )
3000
2963
{
3001
2964
struct inet_connection_sock * icsk = inet_csk (sk );
3002
2965
struct tcp_sock * tp = tcp_sk (sk );
@@ -3146,17 +3109,13 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3146
3109
3147
3110
tp -> bytes_acked = 0 ;
3148
3111
tp -> snd_cwnd_cnt = 0 ;
3149
- tp -> prior_cwnd = tp -> snd_cwnd ;
3150
- tp -> prr_delivered = 0 ;
3151
- tp -> prr_out = 0 ;
3152
3112
tcp_set_ca_state (sk , TCP_CA_Recovery );
3153
3113
fast_rexmit = 1 ;
3154
3114
}
3155
3115
3156
3116
if (do_lost || (tcp_is_fack (tp ) && tcp_head_timedout (sk )))
3157
3117
tcp_update_scoreboard (sk , fast_rexmit );
3158
- tp -> prr_delivered += newly_acked_sacked ;
3159
- tcp_update_cwnd_in_recovery (sk , newly_acked_sacked , fast_rexmit , flag );
3118
+ tcp_cwnd_down (sk , flag );
3160
3119
tcp_xmit_retransmit_queue (sk );
3161
3120
}
3162
3121
@@ -3671,8 +3630,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3671
3630
u32 prior_in_flight ;
3672
3631
u32 prior_fackets ;
3673
3632
int prior_packets ;
3674
- int prior_sacked = tp -> sacked_out ;
3675
- int newly_acked_sacked = 0 ;
3676
3633
int frto_cwnd = 0 ;
3677
3634
3678
3635
/* If the ack is older than previous acks
@@ -3744,9 +3701,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3744
3701
/* See if we can take anything off of the retransmit queue. */
3745
3702
flag |= tcp_clean_rtx_queue (sk , prior_fackets , prior_snd_una );
3746
3703
3747
- newly_acked_sacked = (prior_packets - prior_sacked ) -
3748
- (tp -> packets_out - tp -> sacked_out );
3749
-
3750
3704
if (tp -> frto_counter )
3751
3705
frto_cwnd = tcp_process_frto (sk , flag );
3752
3706
/* Guarantee sacktag reordering detection against wrap-arounds */
@@ -3759,7 +3713,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3759
3713
tcp_may_raise_cwnd (sk , flag ))
3760
3714
tcp_cong_avoid (sk , ack , prior_in_flight );
3761
3715
tcp_fastretrans_alert (sk , prior_packets - tp -> packets_out ,
3762
- newly_acked_sacked , flag );
3716
+ flag );
3763
3717
} else {
3764
3718
if ((flag & FLAG_DATA_ACKED ) && !frto_cwnd )
3765
3719
tcp_cong_avoid (sk , ack , prior_in_flight );
0 commit comments