1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3
4 /* WARNING: This implemenation is not necessarily the same
5 * as the tcp_dctcp.c. The purpose is mainly for testing
6 * the kernel BPF logic.
7 */
8
9 #include <stddef.h>
10 #include <linux/bpf.h>
11 #include <linux/types.h>
12 #include <linux/stddef.h>
13 #include <linux/tcp.h>
14 #include <bpf/bpf_helpers.h>
15 #include <bpf/bpf_tracing.h>
16 #include "bpf_tcp_helpers.h"
17
18 char _license[] SEC("license") = "GPL";
19
20 volatile const char fallback[TCP_CA_NAME_MAX];
21 const char bpf_dctcp[] = "bpf_dctcp";
22 const char tcp_cdg[] = "cdg";
23 char cc_res[TCP_CA_NAME_MAX];
24 int tcp_cdg_res = 0;
25 int stg_result = 0;
26
27 struct {
28 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
29 __uint(map_flags, BPF_F_NO_PREALLOC);
30 __type(key, int);
31 __type(value, int);
32 } sk_stg_map SEC(".maps");
33
34 #define DCTCP_MAX_ALPHA 1024U
35
36 struct dctcp {
37 __u32 old_delivered;
38 __u32 old_delivered_ce;
39 __u32 prior_rcv_nxt;
40 __u32 dctcp_alpha;
41 __u32 next_seq;
42 __u32 ce_state;
43 __u32 loss_cwnd;
44 };
45
46 static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
47 static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
48
dctcp_reset(const struct tcp_sock * tp,struct dctcp * ca)49 static __always_inline void dctcp_reset(const struct tcp_sock *tp,
50 struct dctcp *ca)
51 {
52 ca->next_seq = tp->snd_nxt;
53
54 ca->old_delivered = tp->delivered;
55 ca->old_delivered_ce = tp->delivered_ce;
56 }
57
58 SEC("struct_ops/dctcp_init")
BPF_PROG(dctcp_init,struct sock * sk)59 void BPF_PROG(dctcp_init, struct sock *sk)
60 {
61 const struct tcp_sock *tp = tcp_sk(sk);
62 struct dctcp *ca = inet_csk_ca(sk);
63 int *stg;
64
65 if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) {
66 /* Switch to fallback */
67 bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
68 (void *)fallback, sizeof(fallback));
69 /* Switch back to myself which the bpf trampoline
70 * stopped calling dctcp_init recursively.
71 */
72 bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
73 (void *)bpf_dctcp, sizeof(bpf_dctcp));
74 /* Switch back to fallback */
75 bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
76 (void *)fallback, sizeof(fallback));
77 /* Expecting -ENOTSUPP for tcp_cdg_res */
78 tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
79 (void *)tcp_cdg, sizeof(tcp_cdg));
80 bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
81 (void *)cc_res, sizeof(cc_res));
82 return;
83 }
84
85 ca->prior_rcv_nxt = tp->rcv_nxt;
86 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
87 ca->loss_cwnd = 0;
88 ca->ce_state = 0;
89
90 stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
91 if (stg) {
92 stg_result = *stg;
93 bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
94 }
95 dctcp_reset(tp, ca);
96 }
97
98 SEC("struct_ops/dctcp_ssthresh")
BPF_PROG(dctcp_ssthresh,struct sock * sk)99 __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
100 {
101 struct dctcp *ca = inet_csk_ca(sk);
102 struct tcp_sock *tp = tcp_sk(sk);
103
104 ca->loss_cwnd = tp->snd_cwnd;
105 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
106 }
107
108 SEC("struct_ops/dctcp_update_alpha")
BPF_PROG(dctcp_update_alpha,struct sock * sk,__u32 flags)109 void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
110 {
111 const struct tcp_sock *tp = tcp_sk(sk);
112 struct dctcp *ca = inet_csk_ca(sk);
113
114 /* Expired RTT */
115 if (!before(tp->snd_una, ca->next_seq)) {
116 __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
117 __u32 alpha = ca->dctcp_alpha;
118
119 /* alpha = (1 - g) * alpha + g * F */
120
121 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
122 if (delivered_ce) {
123 __u32 delivered = tp->delivered - ca->old_delivered;
124
125 /* If dctcp_shift_g == 1, a 32bit value would overflow
126 * after 8 M packets.
127 */
128 delivered_ce <<= (10 - dctcp_shift_g);
129 delivered_ce /= max(1U, delivered);
130
131 alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
132 }
133 ca->dctcp_alpha = alpha;
134 dctcp_reset(tp, ca);
135 }
136 }
137
dctcp_react_to_loss(struct sock * sk)138 static __always_inline void dctcp_react_to_loss(struct sock *sk)
139 {
140 struct dctcp *ca = inet_csk_ca(sk);
141 struct tcp_sock *tp = tcp_sk(sk);
142
143 ca->loss_cwnd = tp->snd_cwnd;
144 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
145 }
146
147 SEC("struct_ops/dctcp_state")
BPF_PROG(dctcp_state,struct sock * sk,__u8 new_state)148 void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
149 {
150 if (new_state == TCP_CA_Recovery &&
151 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
152 dctcp_react_to_loss(sk);
153 /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
154 * one loss-adjustment per RTT.
155 */
156 }
157
dctcp_ece_ack_cwr(struct sock * sk,__u32 ce_state)158 static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
159 {
160 struct tcp_sock *tp = tcp_sk(sk);
161
162 if (ce_state == 1)
163 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
164 else
165 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
166 }
167
168 /* Minimal DCTP CE state machine:
169 *
170 * S: 0 <- last pkt was non-CE
171 * 1 <- last pkt was CE
172 */
173 static __always_inline
dctcp_ece_ack_update(struct sock * sk,enum tcp_ca_event evt,__u32 * prior_rcv_nxt,__u32 * ce_state)174 void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
175 __u32 *prior_rcv_nxt, __u32 *ce_state)
176 {
177 __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
178
179 if (*ce_state != new_ce_state) {
180 /* CE state has changed, force an immediate ACK to
181 * reflect the new CE state. If an ACK was delayed,
182 * send that first to reflect the prior CE state.
183 */
184 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
185 dctcp_ece_ack_cwr(sk, *ce_state);
186 bpf_tcp_send_ack(sk, *prior_rcv_nxt);
187 }
188 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
189 }
190 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
191 *ce_state = new_ce_state;
192 dctcp_ece_ack_cwr(sk, new_ce_state);
193 }
194
195 SEC("struct_ops/dctcp_cwnd_event")
BPF_PROG(dctcp_cwnd_event,struct sock * sk,enum tcp_ca_event ev)196 void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
197 {
198 struct dctcp *ca = inet_csk_ca(sk);
199
200 switch (ev) {
201 case CA_EVENT_ECN_IS_CE:
202 case CA_EVENT_ECN_NO_CE:
203 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
204 break;
205 case CA_EVENT_LOSS:
206 dctcp_react_to_loss(sk);
207 break;
208 default:
209 /* Don't care for the rest. */
210 break;
211 }
212 }
213
214 SEC("struct_ops/dctcp_cwnd_undo")
BPF_PROG(dctcp_cwnd_undo,struct sock * sk)215 __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
216 {
217 const struct dctcp *ca = inet_csk_ca(sk);
218
219 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
220 }
221
222 extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
223
224 SEC("struct_ops/dctcp_reno_cong_avoid")
BPF_PROG(dctcp_cong_avoid,struct sock * sk,__u32 ack,__u32 acked)225 void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
226 {
227 tcp_reno_cong_avoid(sk, ack, acked);
228 }
229
230 SEC(".struct_ops")
231 struct tcp_congestion_ops dctcp_nouse = {
232 .init = (void *)dctcp_init,
233 .set_state = (void *)dctcp_state,
234 .flags = TCP_CONG_NEEDS_ECN,
235 .name = "bpf_dctcp_nouse",
236 };
237
238 SEC(".struct_ops")
239 struct tcp_congestion_ops dctcp = {
240 .init = (void *)dctcp_init,
241 .in_ack_event = (void *)dctcp_update_alpha,
242 .cwnd_event = (void *)dctcp_cwnd_event,
243 .ssthresh = (void *)dctcp_ssthresh,
244 .cong_avoid = (void *)dctcp_cong_avoid,
245 .undo_cwnd = (void *)dctcp_cwnd_undo,
246 .set_state = (void *)dctcp_state,
247 .flags = TCP_CONG_NEEDS_ECN,
248 .name = "bpf_dctcp",
249 };
250