1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3 *
4 * RMNET Data virtual network driver
5 */
6
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/if_arp.h>
10 #include <net/pkt_sched.h>
11 #include "rmnet_config.h"
12 #include "rmnet_handlers.h"
13 #include "rmnet_private.h"
14 #include "rmnet_map.h"
15 #include "rmnet_vnd.h"
16
17 /* RX/TX Fixup */
18
rmnet_vnd_rx_fixup(struct sk_buff * skb,struct net_device * dev)19 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
20 {
21 struct rmnet_priv *priv = netdev_priv(dev);
22 struct rmnet_pcpu_stats *pcpu_ptr;
23
24 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
25
26 u64_stats_update_begin(&pcpu_ptr->syncp);
27 pcpu_ptr->stats.rx_pkts++;
28 pcpu_ptr->stats.rx_bytes += skb->len;
29 u64_stats_update_end(&pcpu_ptr->syncp);
30 }
31
rmnet_vnd_tx_fixup(struct sk_buff * skb,struct net_device * dev)32 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
33 {
34 struct rmnet_priv *priv = netdev_priv(dev);
35 struct rmnet_pcpu_stats *pcpu_ptr;
36
37 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
38
39 u64_stats_update_begin(&pcpu_ptr->syncp);
40 pcpu_ptr->stats.tx_pkts++;
41 pcpu_ptr->stats.tx_bytes += skb->len;
42 u64_stats_update_end(&pcpu_ptr->syncp);
43 }
44
45 /* Network Device Operations */
46
rmnet_vnd_start_xmit(struct sk_buff * skb,struct net_device * dev)47 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
48 struct net_device *dev)
49 {
50 struct rmnet_priv *priv;
51
52 priv = netdev_priv(dev);
53 if (priv->real_dev) {
54 rmnet_egress_handler(skb);
55 } else {
56 this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
57 kfree_skb(skb);
58 }
59 return NETDEV_TX_OK;
60 }
61
rmnet_vnd_headroom(struct rmnet_port * port)62 static int rmnet_vnd_headroom(struct rmnet_port *port)
63 {
64 u32 headroom;
65
66 headroom = sizeof(struct rmnet_map_header);
67
68 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
69 headroom += sizeof(struct rmnet_map_ul_csum_header);
70
71 return headroom;
72 }
73
rmnet_vnd_change_mtu(struct net_device * rmnet_dev,int new_mtu)74 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
75 {
76 struct rmnet_priv *priv = netdev_priv(rmnet_dev);
77 struct rmnet_port *port;
78 u32 headroom;
79
80 port = rmnet_get_port_rtnl(priv->real_dev);
81
82 headroom = rmnet_vnd_headroom(port);
83
84 if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
85 new_mtu > (priv->real_dev->mtu - headroom))
86 return -EINVAL;
87
88 rmnet_dev->mtu = new_mtu;
89 return 0;
90 }
91
rmnet_vnd_get_iflink(const struct net_device * dev)92 static int rmnet_vnd_get_iflink(const struct net_device *dev)
93 {
94 struct rmnet_priv *priv = netdev_priv(dev);
95
96 return priv->real_dev->ifindex;
97 }
98
rmnet_vnd_init(struct net_device * dev)99 static int rmnet_vnd_init(struct net_device *dev)
100 {
101 struct rmnet_priv *priv = netdev_priv(dev);
102 int err;
103
104 priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
105 if (!priv->pcpu_stats)
106 return -ENOMEM;
107
108 err = gro_cells_init(&priv->gro_cells, dev);
109 if (err) {
110 free_percpu(priv->pcpu_stats);
111 return err;
112 }
113
114 return 0;
115 }
116
rmnet_vnd_uninit(struct net_device * dev)117 static void rmnet_vnd_uninit(struct net_device *dev)
118 {
119 struct rmnet_priv *priv = netdev_priv(dev);
120
121 gro_cells_destroy(&priv->gro_cells);
122 free_percpu(priv->pcpu_stats);
123 }
124
rmnet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)125 static void rmnet_get_stats64(struct net_device *dev,
126 struct rtnl_link_stats64 *s)
127 {
128 struct rmnet_priv *priv = netdev_priv(dev);
129 struct rmnet_vnd_stats total_stats = { };
130 struct rmnet_pcpu_stats *pcpu_ptr;
131 struct rmnet_vnd_stats snapshot;
132 unsigned int cpu, start;
133
134 for_each_possible_cpu(cpu) {
135 pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
136
137 do {
138 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
139 snapshot = pcpu_ptr->stats; /* struct assignment */
140 } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
141
142 total_stats.rx_pkts += snapshot.rx_pkts;
143 total_stats.rx_bytes += snapshot.rx_bytes;
144 total_stats.tx_pkts += snapshot.tx_pkts;
145 total_stats.tx_bytes += snapshot.tx_bytes;
146 total_stats.tx_drops += snapshot.tx_drops;
147 }
148
149 s->rx_packets = total_stats.rx_pkts;
150 s->rx_bytes = total_stats.rx_bytes;
151 s->tx_packets = total_stats.tx_pkts;
152 s->tx_bytes = total_stats.tx_bytes;
153 s->tx_dropped = total_stats.tx_drops;
154 }
155
156 static const struct net_device_ops rmnet_vnd_ops = {
157 .ndo_start_xmit = rmnet_vnd_start_xmit,
158 .ndo_change_mtu = rmnet_vnd_change_mtu,
159 .ndo_get_iflink = rmnet_vnd_get_iflink,
160 .ndo_add_slave = rmnet_add_bridge,
161 .ndo_del_slave = rmnet_del_bridge,
162 .ndo_init = rmnet_vnd_init,
163 .ndo_uninit = rmnet_vnd_uninit,
164 .ndo_get_stats64 = rmnet_get_stats64,
165 };
166
167 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
168 "Checksum ok",
169 "Bad IPv4 header checksum",
170 "Checksum valid bit not set",
171 "Checksum validation failed",
172 "Checksum error bad buffer",
173 "Checksum error bad ip version",
174 "Checksum error bad transport",
175 "Checksum skipped on ip fragment",
176 "Checksum skipped",
177 "Checksum computed in software",
178 "Checksum computed in hardware",
179 };
180
rmnet_get_strings(struct net_device * dev,u32 stringset,u8 * buf)181 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
182 {
183 switch (stringset) {
184 case ETH_SS_STATS:
185 memcpy(buf, &rmnet_gstrings_stats,
186 sizeof(rmnet_gstrings_stats));
187 break;
188 }
189 }
190
rmnet_get_sset_count(struct net_device * dev,int sset)191 static int rmnet_get_sset_count(struct net_device *dev, int sset)
192 {
193 switch (sset) {
194 case ETH_SS_STATS:
195 return ARRAY_SIZE(rmnet_gstrings_stats);
196 default:
197 return -EOPNOTSUPP;
198 }
199 }
200
rmnet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)201 static void rmnet_get_ethtool_stats(struct net_device *dev,
202 struct ethtool_stats *stats, u64 *data)
203 {
204 struct rmnet_priv *priv = netdev_priv(dev);
205 struct rmnet_priv_stats *st = &priv->stats;
206
207 if (!data)
208 return;
209
210 memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
211 }
212
213 static const struct ethtool_ops rmnet_ethtool_ops = {
214 .get_ethtool_stats = rmnet_get_ethtool_stats,
215 .get_strings = rmnet_get_strings,
216 .get_sset_count = rmnet_get_sset_count,
217 };
218
219 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
220 * flags, ARP type, needed headroom, etc...
221 */
rmnet_vnd_setup(struct net_device * rmnet_dev)222 void rmnet_vnd_setup(struct net_device *rmnet_dev)
223 {
224 rmnet_dev->netdev_ops = &rmnet_vnd_ops;
225 rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
226 rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
227 eth_hw_addr_random(rmnet_dev);
228 rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
229
230 /* Raw IP mode */
231 rmnet_dev->header_ops = NULL; /* No header */
232 rmnet_dev->type = ARPHRD_RAWIP;
233 rmnet_dev->hard_header_len = 0;
234 rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
235
236 rmnet_dev->needs_free_netdev = true;
237 rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
238
239 rmnet_dev->features |= NETIF_F_LLTX;
240
241 /* This perm addr will be used as interface identifier by IPv6 */
242 rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
243 eth_random_addr(rmnet_dev->perm_addr);
244 }
245
246 /* Exposed API */
247
rmnet_vnd_newlink(u8 id,struct net_device * rmnet_dev,struct rmnet_port * port,struct net_device * real_dev,struct rmnet_endpoint * ep,struct netlink_ext_ack * extack)248 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
249 struct rmnet_port *port,
250 struct net_device *real_dev,
251 struct rmnet_endpoint *ep,
252 struct netlink_ext_ack *extack)
253
254 {
255 struct rmnet_priv *priv = netdev_priv(rmnet_dev);
256 u32 headroom;
257 int rc;
258
259 if (rmnet_get_endpoint(port, id)) {
260 NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
261 return -EBUSY;
262 }
263
264 rmnet_dev->hw_features = NETIF_F_RXCSUM;
265 rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
266 rmnet_dev->hw_features |= NETIF_F_SG;
267
268 priv->real_dev = real_dev;
269
270 headroom = rmnet_vnd_headroom(port);
271
272 if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
273 NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
274 return -EINVAL;
275 }
276
277 rc = register_netdevice(rmnet_dev);
278 if (!rc) {
279 ep->egress_dev = rmnet_dev;
280 ep->mux_id = id;
281 port->nr_rmnet_devs++;
282
283 rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
284
285 priv->mux_id = id;
286
287 netdev_dbg(rmnet_dev, "rmnet dev created\n");
288 }
289
290 return rc;
291 }
292
rmnet_vnd_dellink(u8 id,struct rmnet_port * port,struct rmnet_endpoint * ep)293 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
294 struct rmnet_endpoint *ep)
295 {
296 if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
297 return -EINVAL;
298
299 ep->egress_dev = NULL;
300 port->nr_rmnet_devs--;
301 return 0;
302 }
303
rmnet_vnd_do_flow_control(struct net_device * rmnet_dev,int enable)304 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
305 {
306 netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
307 /* Although we expect similar number of enable/disable
308 * commands, optimize for the disable. That is more
309 * latency sensitive than enable
310 */
311 if (unlikely(enable))
312 netif_wake_queue(rmnet_dev);
313 else
314 netif_stop_queue(rmnet_dev);
315
316 return 0;
317 }
318
rmnet_vnd_validate_real_dev_mtu(struct net_device * real_dev)319 int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
320 {
321 struct hlist_node *tmp_ep;
322 struct rmnet_endpoint *ep;
323 struct rmnet_port *port;
324 unsigned long bkt_ep;
325 u32 headroom;
326
327 port = rmnet_get_port_rtnl(real_dev);
328
329 headroom = rmnet_vnd_headroom(port);
330
331 hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
332 if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
333 return -1;
334 }
335
336 return 0;
337 }
338
rmnet_vnd_update_dev_mtu(struct rmnet_port * port,struct net_device * real_dev)339 int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
340 struct net_device *real_dev)
341 {
342 struct hlist_node *tmp_ep;
343 struct rmnet_endpoint *ep;
344 unsigned long bkt_ep;
345 u32 headroom;
346
347 headroom = rmnet_vnd_headroom(port);
348
349 hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
350 if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
351 continue;
352
353 if (rmnet_vnd_change_mtu(ep->egress_dev,
354 real_dev->mtu - headroom))
355 return -1;
356 }
357
358 return 0;
359 }
360