Skip to content

Commit 0e5e535

Browse files
committed
Revert "Proportional Rate Reduction for TCP."
This reverts commit a5439dd.
1 parent 373d74c commit 0e5e535

File tree

3 files changed

+7
-62
lines changed

3 files changed

+7
-62
lines changed

include/linux/tcp.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -379,10 +379,6 @@ struct tcp_sock {
379379
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
380380
u32 snd_cwnd_used;
381381
u32 snd_cwnd_stamp;
382-
u32 prior_cwnd; /* Congestion window at start of Recovery. */
383-
u32 prr_delivered; /* Number of newly delivered packets to
384-
* receiver in Recovery. */
385-
u32 prr_out; /* Total number of pkts sent during Recovery. */
386382

387383
u32 rcv_wnd; /* Current receiver window */
388384
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */

net/ipv4/tcp_input.c

Lines changed: 6 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -2828,13 +2828,9 @@ static int tcp_try_undo_loss(struct sock *sk)
28282828
static inline void tcp_complete_cwr(struct sock *sk)
28292829
{
28302830
struct tcp_sock *tp = tcp_sk(sk);
2831-
2832-
/* Do not moderate cwnd if it's already undone in cwr or recovery. */
2833-
if (tp->undo_marker) {
2834-
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
2835-
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2836-
else /* PRR */
2837-
tp->snd_cwnd = tp->snd_ssthresh;
2831+
/* Do not moderate cwnd if it's already undone in cwr or recovery */
2832+
if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
2833+
tp->snd_cwnd = tp->snd_ssthresh;
28382834
tp->snd_cwnd_stamp = tcp_time_stamp;
28392835
}
28402836
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@ -2952,38 +2948,6 @@ void tcp_simple_retransmit(struct sock *sk)
29522948
}
29532949
EXPORT_SYMBOL(tcp_simple_retransmit);
29542950

2955-
/* This function implements the PRR algorithm, specifcally the PRR-SSRB
2956-
* (proportional rate reduction with slow start reduction bound) as described in
2957-
* http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
2958-
* It computes the number of packets to send (sndcnt) based on packets newly
2959-
* delivered:
2960-
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
2961-
* cwnd reductions across a full RTT.
2962-
* 2) If packets in flight is lower than ssthresh (such as due to excess
2963-
* losses and/or application stalls), do not perform any further cwnd
2964-
* reductions, but instead slow start up to ssthresh.
2965-
*/
2966-
static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
2967-
int fast_rexmit, int flag)
2968-
{
2969-
struct tcp_sock *tp = tcp_sk(sk);
2970-
int sndcnt = 0;
2971-
int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2972-
2973-
if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
2974-
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2975-
tp->prior_cwnd - 1;
2976-
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2977-
} else {
2978-
sndcnt = min_t(int, delta,
2979-
max_t(int, tp->prr_delivered - tp->prr_out,
2980-
newly_acked_sacked) + 1);
2981-
}
2982-
2983-
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
2984-
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
2985-
}
2986-
29872951
/* Process an event, which can update packets-in-flight not trivially.
29882952
* Main goal of this function is to calculate new estimate for left_out,
29892953
* taking into account both packets sitting in receiver's buffer and
@@ -2995,8 +2959,7 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
29952959
* It does _not_ decide what to send, it is made in function
29962960
* tcp_xmit_retransmit_queue().
29972961
*/
2998-
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2999-
int newly_acked_sacked, int flag)
2962+
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
30002963
{
30012964
struct inet_connection_sock *icsk = inet_csk(sk);
30022965
struct tcp_sock *tp = tcp_sk(sk);
@@ -3146,17 +3109,13 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
31463109

31473110
tp->bytes_acked = 0;
31483111
tp->snd_cwnd_cnt = 0;
3149-
tp->prior_cwnd = tp->snd_cwnd;
3150-
tp->prr_delivered = 0;
3151-
tp->prr_out = 0;
31523112
tcp_set_ca_state(sk, TCP_CA_Recovery);
31533113
fast_rexmit = 1;
31543114
}
31553115

31563116
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
31573117
tcp_update_scoreboard(sk, fast_rexmit);
3158-
tp->prr_delivered += newly_acked_sacked;
3159-
tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
3118+
tcp_cwnd_down(sk, flag);
31603119
tcp_xmit_retransmit_queue(sk);
31613120
}
31623121

@@ -3671,8 +3630,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
36713630
u32 prior_in_flight;
36723631
u32 prior_fackets;
36733632
int prior_packets;
3674-
int prior_sacked = tp->sacked_out;
3675-
int newly_acked_sacked = 0;
36763633
int frto_cwnd = 0;
36773634

36783635
/* If the ack is older than previous acks
@@ -3744,9 +3701,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
37443701
/* See if we can take anything off of the retransmit queue. */
37453702
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
37463703

3747-
newly_acked_sacked = (prior_packets - prior_sacked) -
3748-
(tp->packets_out - tp->sacked_out);
3749-
37503704
if (tp->frto_counter)
37513705
frto_cwnd = tcp_process_frto(sk, flag);
37523706
/* Guarantee sacktag reordering detection against wrap-arounds */
@@ -3759,7 +3713,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
37593713
tcp_may_raise_cwnd(sk, flag))
37603714
tcp_cong_avoid(sk, ack, prior_in_flight);
37613715
tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
3762-
newly_acked_sacked, flag);
3716+
flag);
37633717
} else {
37643718
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
37653719
tcp_cong_avoid(sk, ack, prior_in_flight);

net/ipv4/tcp_output.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1794,13 +1794,11 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
17941794
tcp_event_new_data_sent(sk, skb);
17951795

17961796
tcp_minshall_update(tp, mss_now, skb);
1797-
sent_pkts += tcp_skb_pcount(skb);
1797+
sent_pkts++;
17981798

17991799
if (push_one)
18001800
break;
18011801
}
1802-
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
1803-
tp->prr_out += sent_pkts;
18041802

18051803
if (likely(sent_pkts)) {
18061804
tcp_cwnd_validate(sk);
@@ -2294,9 +2292,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
22942292
return;
22952293
NET_INC_STATS_BH(sock_net(sk), mib_idx);
22962294

2297-
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
2298-
tp->prr_out += tcp_skb_pcount(skb);
2299-
23002295
if (skb == tcp_write_queue_head(sk))
23012296
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
23022297
inet_csk(sk)->icsk_rto,

0 commit comments

Comments
 (0)