1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ifb.c:
3
4 The purpose of this driver is to provide a device that allows
5 for sharing of resources:
6
7 1) qdiscs/policies that are per device as opposed to system wide.
8 ifb allows for a device which can be redirected to thus providing
9 an impression of sharing.
10
11 2) Allows for queueing incoming traffic for shaping instead of
12 dropping.
13
14 The original concept is based on what is known as the IMQ
15 driver initially written by Martin Devera, later rewritten
16 by Patrick McHardy and then maintained by Andre Correa.
17
18 You need the tc action mirror or redirect to feed this device
19 packets.
20
21
22 Authors: Jamal Hadi Salim (2005)
23
24 */
25
26
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/moduleparam.h>
34 #include <linux/netfilter_netdev.h>
35 #include <net/pkt_sched.h>
36 #include <net/net_namespace.h>
37
38 #define TX_Q_LIMIT 32
39 struct ifb_q_private {
40 struct net_device *dev;
41 struct tasklet_struct ifb_tasklet;
42 int tasklet_pending;
43 int txqnum;
44 struct sk_buff_head rq;
45 u64 rx_packets;
46 u64 rx_bytes;
47 struct u64_stats_sync rsync;
48
49 struct u64_stats_sync tsync;
50 u64 tx_packets;
51 u64 tx_bytes;
52 struct sk_buff_head tq;
53 } ____cacheline_aligned_in_smp;
54
55 struct ifb_dev_private {
56 struct ifb_q_private *tx_private;
57 };
58
59 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
60 static int ifb_open(struct net_device *dev);
61 static int ifb_close(struct net_device *dev);
62
ifb_ri_tasklet(struct tasklet_struct * t)63 static void ifb_ri_tasklet(struct tasklet_struct *t)
64 {
65 struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
66 struct netdev_queue *txq;
67 struct sk_buff *skb;
68
69 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
70 skb = skb_peek(&txp->tq);
71 if (!skb) {
72 if (!__netif_tx_trylock(txq))
73 goto resched;
74 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
75 __netif_tx_unlock(txq);
76 }
77
78 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
79 /* Skip tc and netfilter to prevent redirection loop. */
80 skb->redirected = 0;
81 #ifdef CONFIG_NET_CLS_ACT
82 skb->tc_skip_classify = 1;
83 #endif
84 nf_skip_egress(skb, true);
85
86 u64_stats_update_begin(&txp->tsync);
87 txp->tx_packets++;
88 txp->tx_bytes += skb->len;
89 u64_stats_update_end(&txp->tsync);
90
91 rcu_read_lock();
92 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
93 if (!skb->dev) {
94 rcu_read_unlock();
95 dev_kfree_skb(skb);
96 txp->dev->stats.tx_dropped++;
97 if (skb_queue_len(&txp->tq) != 0)
98 goto resched;
99 break;
100 }
101 rcu_read_unlock();
102 skb->skb_iif = txp->dev->ifindex;
103
104 if (!skb->from_ingress) {
105 dev_queue_xmit(skb);
106 } else {
107 skb_pull_rcsum(skb, skb->mac_len);
108 netif_receive_skb(skb);
109 }
110 }
111
112 if (__netif_tx_trylock(txq)) {
113 skb = skb_peek(&txp->rq);
114 if (!skb) {
115 txp->tasklet_pending = 0;
116 if (netif_tx_queue_stopped(txq))
117 netif_tx_wake_queue(txq);
118 } else {
119 __netif_tx_unlock(txq);
120 goto resched;
121 }
122 __netif_tx_unlock(txq);
123 } else {
124 resched:
125 txp->tasklet_pending = 1;
126 tasklet_schedule(&txp->ifb_tasklet);
127 }
128
129 }
130
ifb_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)131 static void ifb_stats64(struct net_device *dev,
132 struct rtnl_link_stats64 *stats)
133 {
134 struct ifb_dev_private *dp = netdev_priv(dev);
135 struct ifb_q_private *txp = dp->tx_private;
136 unsigned int start;
137 u64 packets, bytes;
138 int i;
139
140 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
141 do {
142 start = u64_stats_fetch_begin_irq(&txp->rsync);
143 packets = txp->rx_packets;
144 bytes = txp->rx_bytes;
145 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
146 stats->rx_packets += packets;
147 stats->rx_bytes += bytes;
148
149 do {
150 start = u64_stats_fetch_begin_irq(&txp->tsync);
151 packets = txp->tx_packets;
152 bytes = txp->tx_bytes;
153 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
154 stats->tx_packets += packets;
155 stats->tx_bytes += bytes;
156 }
157 stats->rx_dropped = dev->stats.rx_dropped;
158 stats->tx_dropped = dev->stats.tx_dropped;
159 }
160
ifb_dev_init(struct net_device * dev)161 static int ifb_dev_init(struct net_device *dev)
162 {
163 struct ifb_dev_private *dp = netdev_priv(dev);
164 struct ifb_q_private *txp;
165 int i;
166
167 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
168 if (!txp)
169 return -ENOMEM;
170 dp->tx_private = txp;
171 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
172 txp->txqnum = i;
173 txp->dev = dev;
174 __skb_queue_head_init(&txp->rq);
175 __skb_queue_head_init(&txp->tq);
176 u64_stats_init(&txp->rsync);
177 u64_stats_init(&txp->tsync);
178 tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
179 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
180 }
181 return 0;
182 }
183
184 static const struct net_device_ops ifb_netdev_ops = {
185 .ndo_open = ifb_open,
186 .ndo_stop = ifb_close,
187 .ndo_get_stats64 = ifb_stats64,
188 .ndo_start_xmit = ifb_xmit,
189 .ndo_validate_addr = eth_validate_addr,
190 .ndo_init = ifb_dev_init,
191 };
192
193 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
194 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
195 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
196 NETIF_F_HW_VLAN_STAG_TX)
197
ifb_dev_free(struct net_device * dev)198 static void ifb_dev_free(struct net_device *dev)
199 {
200 struct ifb_dev_private *dp = netdev_priv(dev);
201 struct ifb_q_private *txp = dp->tx_private;
202 int i;
203
204 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
205 tasklet_kill(&txp->ifb_tasklet);
206 __skb_queue_purge(&txp->rq);
207 __skb_queue_purge(&txp->tq);
208 }
209 kfree(dp->tx_private);
210 }
211
ifb_setup(struct net_device * dev)212 static void ifb_setup(struct net_device *dev)
213 {
214 /* Initialize the device structure. */
215 dev->netdev_ops = &ifb_netdev_ops;
216
217 /* Fill in device structure with ethernet-generic values. */
218 ether_setup(dev);
219 dev->tx_queue_len = TX_Q_LIMIT;
220
221 dev->features |= IFB_FEATURES;
222 dev->hw_features |= dev->features;
223 dev->hw_enc_features |= dev->features;
224 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
225 NETIF_F_HW_VLAN_STAG_TX);
226
227 dev->flags |= IFF_NOARP;
228 dev->flags &= ~IFF_MULTICAST;
229 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
230 netif_keep_dst(dev);
231 eth_hw_addr_random(dev);
232 dev->needs_free_netdev = true;
233 dev->priv_destructor = ifb_dev_free;
234
235 dev->min_mtu = 0;
236 dev->max_mtu = 0;
237 }
238
ifb_xmit(struct sk_buff * skb,struct net_device * dev)239 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
240 {
241 struct ifb_dev_private *dp = netdev_priv(dev);
242 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
243
244 u64_stats_update_begin(&txp->rsync);
245 txp->rx_packets++;
246 txp->rx_bytes += skb->len;
247 u64_stats_update_end(&txp->rsync);
248
249 if (!skb->redirected || !skb->skb_iif) {
250 dev_kfree_skb(skb);
251 dev->stats.rx_dropped++;
252 return NETDEV_TX_OK;
253 }
254
255 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
256 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
257
258 __skb_queue_tail(&txp->rq, skb);
259 if (!txp->tasklet_pending) {
260 txp->tasklet_pending = 1;
261 tasklet_schedule(&txp->ifb_tasklet);
262 }
263
264 return NETDEV_TX_OK;
265 }
266
ifb_close(struct net_device * dev)267 static int ifb_close(struct net_device *dev)
268 {
269 netif_tx_stop_all_queues(dev);
270 return 0;
271 }
272
ifb_open(struct net_device * dev)273 static int ifb_open(struct net_device *dev)
274 {
275 netif_tx_start_all_queues(dev);
276 return 0;
277 }
278
ifb_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)279 static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
280 struct netlink_ext_ack *extack)
281 {
282 if (tb[IFLA_ADDRESS]) {
283 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
284 return -EINVAL;
285 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
286 return -EADDRNOTAVAIL;
287 }
288 return 0;
289 }
290
291 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
292 .kind = "ifb",
293 .priv_size = sizeof(struct ifb_dev_private),
294 .setup = ifb_setup,
295 .validate = ifb_validate,
296 };
297
298 /* Number of ifb devices to be set up by this module.
299 * Note that these legacy devices have one queue.
300 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
301 */
302 static int numifbs = 2;
303 module_param(numifbs, int, 0);
304 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
305
ifb_init_one(int index)306 static int __init ifb_init_one(int index)
307 {
308 struct net_device *dev_ifb;
309 int err;
310
311 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
312 NET_NAME_UNKNOWN, ifb_setup);
313
314 if (!dev_ifb)
315 return -ENOMEM;
316
317 dev_ifb->rtnl_link_ops = &ifb_link_ops;
318 err = register_netdevice(dev_ifb);
319 if (err < 0)
320 goto err;
321
322 return 0;
323
324 err:
325 free_netdev(dev_ifb);
326 return err;
327 }
328
ifb_init_module(void)329 static int __init ifb_init_module(void)
330 {
331 int i, err;
332
333 down_write(&pernet_ops_rwsem);
334 rtnl_lock();
335 err = __rtnl_link_register(&ifb_link_ops);
336 if (err < 0)
337 goto out;
338
339 for (i = 0; i < numifbs && !err; i++) {
340 err = ifb_init_one(i);
341 cond_resched();
342 }
343 if (err)
344 __rtnl_link_unregister(&ifb_link_ops);
345
346 out:
347 rtnl_unlock();
348 up_write(&pernet_ops_rwsem);
349
350 return err;
351 }
352
ifb_cleanup_module(void)353 static void __exit ifb_cleanup_module(void)
354 {
355 rtnl_link_unregister(&ifb_link_ops);
356 }
357
358 module_init(ifb_init_module);
359 module_exit(ifb_cleanup_module);
360 MODULE_LICENSE("GPL");
361 MODULE_AUTHOR("Jamal Hadi Salim");
362 MODULE_ALIAS_RTNL_LINK("ifb");
363