1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/dsa/dsa.c - Hardware switch handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 */
7
8 #include <linux/device.h>
9 #include <linux/list.h>
10 #include <linux/platform_device.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/notifier.h>
14 #include <linux/of.h>
15 #include <linux/of_mdio.h>
16 #include <linux/of_platform.h>
17 #include <linux/of_net.h>
18 #include <linux/netdevice.h>
19 #include <linux/sysfs.h>
20 #include <linux/phy_fixed.h>
21 #include <linux/ptp_classify.h>
22 #include <linux/etherdevice.h>
23
24 #include "dsa_priv.h"
25
26 static LIST_HEAD(dsa_tag_drivers_list);
27 static DEFINE_MUTEX(dsa_tag_drivers_lock);
28
dsa_slave_notag_xmit(struct sk_buff * skb,struct net_device * dev)29 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
30 struct net_device *dev)
31 {
32 /* Just return the original SKB */
33 return skb;
34 }
35
36 static const struct dsa_device_ops none_ops = {
37 .name = "none",
38 .proto = DSA_TAG_PROTO_NONE,
39 .xmit = dsa_slave_notag_xmit,
40 .rcv = NULL,
41 };
42
43 DSA_TAG_DRIVER(none_ops);
44
dsa_tag_driver_register(struct dsa_tag_driver * dsa_tag_driver,struct module * owner)45 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
46 struct module *owner)
47 {
48 dsa_tag_driver->owner = owner;
49
50 mutex_lock(&dsa_tag_drivers_lock);
51 list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
52 mutex_unlock(&dsa_tag_drivers_lock);
53 }
54
dsa_tag_drivers_register(struct dsa_tag_driver * dsa_tag_driver_array[],unsigned int count,struct module * owner)55 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
56 unsigned int count, struct module *owner)
57 {
58 unsigned int i;
59
60 for (i = 0; i < count; i++)
61 dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
62 }
63
dsa_tag_driver_unregister(struct dsa_tag_driver * dsa_tag_driver)64 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
65 {
66 mutex_lock(&dsa_tag_drivers_lock);
67 list_del(&dsa_tag_driver->list);
68 mutex_unlock(&dsa_tag_drivers_lock);
69 }
70 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
71
dsa_tag_drivers_unregister(struct dsa_tag_driver * dsa_tag_driver_array[],unsigned int count)72 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
73 unsigned int count)
74 {
75 unsigned int i;
76
77 for (i = 0; i < count; i++)
78 dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
79 }
80 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
81
dsa_tag_protocol_to_str(const struct dsa_device_ops * ops)82 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
83 {
84 return ops->name;
85 };
86
87 /* Function takes a reference on the module owning the tagger,
88 * so dsa_tag_driver_put must be called afterwards.
89 */
dsa_find_tagger_by_name(const char * buf)90 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf)
91 {
92 const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
93 struct dsa_tag_driver *dsa_tag_driver;
94
95 mutex_lock(&dsa_tag_drivers_lock);
96 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
97 const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
98
99 if (!sysfs_streq(buf, tmp->name))
100 continue;
101
102 if (!try_module_get(dsa_tag_driver->owner))
103 break;
104
105 ops = tmp;
106 break;
107 }
108 mutex_unlock(&dsa_tag_drivers_lock);
109
110 return ops;
111 }
112
dsa_tag_driver_get(int tag_protocol)113 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
114 {
115 struct dsa_tag_driver *dsa_tag_driver;
116 const struct dsa_device_ops *ops;
117 bool found = false;
118
119 request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
120
121 mutex_lock(&dsa_tag_drivers_lock);
122 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
123 ops = dsa_tag_driver->ops;
124 if (ops->proto == tag_protocol) {
125 found = true;
126 break;
127 }
128 }
129
130 if (found) {
131 if (!try_module_get(dsa_tag_driver->owner))
132 ops = ERR_PTR(-ENOPROTOOPT);
133 } else {
134 ops = ERR_PTR(-ENOPROTOOPT);
135 }
136
137 mutex_unlock(&dsa_tag_drivers_lock);
138
139 return ops;
140 }
141
dsa_tag_driver_put(const struct dsa_device_ops * ops)142 void dsa_tag_driver_put(const struct dsa_device_ops *ops)
143 {
144 struct dsa_tag_driver *dsa_tag_driver;
145
146 mutex_lock(&dsa_tag_drivers_lock);
147 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
148 if (dsa_tag_driver->ops == ops) {
149 module_put(dsa_tag_driver->owner);
150 break;
151 }
152 }
153 mutex_unlock(&dsa_tag_drivers_lock);
154 }
155
dev_is_class(struct device * dev,void * class)156 static int dev_is_class(struct device *dev, void *class)
157 {
158 if (dev->class != NULL && !strcmp(dev->class->name, class))
159 return 1;
160
161 return 0;
162 }
163
dev_find_class(struct device * parent,char * class)164 static struct device *dev_find_class(struct device *parent, char *class)
165 {
166 if (dev_is_class(parent, class)) {
167 get_device(parent);
168 return parent;
169 }
170
171 return device_find_child(parent, class, dev_is_class);
172 }
173
dsa_dev_to_net_device(struct device * dev)174 struct net_device *dsa_dev_to_net_device(struct device *dev)
175 {
176 struct device *d;
177
178 d = dev_find_class(dev, "net");
179 if (d != NULL) {
180 struct net_device *nd;
181
182 nd = to_net_dev(d);
183 dev_hold(nd);
184 put_device(d);
185
186 return nd;
187 }
188
189 return NULL;
190 }
191 EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
192
193 /* Determine if we should defer delivery of skb until we have a rx timestamp.
194 *
195 * Called from dsa_switch_rcv. For now, this will only work if tagging is
196 * enabled on the switch. Normally the MAC driver would retrieve the hardware
197 * timestamp when it reads the packet out of the hardware. However in a DSA
198 * switch, the DSA driver owning the interface to which the packet is
199 * delivered is never notified unless we do so here.
200 */
dsa_skb_defer_rx_timestamp(struct dsa_slave_priv * p,struct sk_buff * skb)201 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
202 struct sk_buff *skb)
203 {
204 struct dsa_switch *ds = p->dp->ds;
205 unsigned int type;
206
207 if (skb_headroom(skb) < ETH_HLEN)
208 return false;
209
210 __skb_push(skb, ETH_HLEN);
211
212 type = ptp_classify_raw(skb);
213
214 __skb_pull(skb, ETH_HLEN);
215
216 if (type == PTP_CLASS_NONE)
217 return false;
218
219 if (likely(ds->ops->port_rxtstamp))
220 return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
221
222 return false;
223 }
224
dsa_switch_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * unused)225 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
226 struct packet_type *pt, struct net_device *unused)
227 {
228 struct dsa_port *cpu_dp = dev->dsa_ptr;
229 struct sk_buff *nskb = NULL;
230 struct dsa_slave_priv *p;
231
232 if (unlikely(!cpu_dp)) {
233 kfree_skb(skb);
234 return 0;
235 }
236
237 skb = skb_unshare(skb, GFP_ATOMIC);
238 if (!skb)
239 return 0;
240
241 nskb = cpu_dp->rcv(skb, dev);
242 if (!nskb) {
243 kfree_skb(skb);
244 return 0;
245 }
246
247 skb = nskb;
248 skb_push(skb, ETH_HLEN);
249 skb->pkt_type = PACKET_HOST;
250 skb->protocol = eth_type_trans(skb, skb->dev);
251
252 if (unlikely(!dsa_slave_dev_check(skb->dev))) {
253 /* Packet is to be injected directly on an upper
254 * device, e.g. a team/bond, so skip all DSA-port
255 * specific actions.
256 */
257 netif_rx(skb);
258 return 0;
259 }
260
261 p = netdev_priv(skb->dev);
262
263 if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
264 nskb = dsa_untag_bridge_pvid(skb);
265 if (!nskb) {
266 kfree_skb(skb);
267 return 0;
268 }
269 skb = nskb;
270 }
271
272 dev_sw_netstats_rx_add(skb->dev, skb->len);
273
274 if (dsa_skb_defer_rx_timestamp(p, skb))
275 return 0;
276
277 gro_cells_receive(&p->gcells, skb);
278
279 return 0;
280 }
281
282 #ifdef CONFIG_PM_SLEEP
dsa_port_is_initialized(const struct dsa_port * dp)283 static bool dsa_port_is_initialized(const struct dsa_port *dp)
284 {
285 return dp->type == DSA_PORT_TYPE_USER && dp->slave;
286 }
287
dsa_switch_suspend(struct dsa_switch * ds)288 int dsa_switch_suspend(struct dsa_switch *ds)
289 {
290 struct dsa_port *dp;
291 int ret = 0;
292
293 /* Suspend slave network devices */
294 dsa_switch_for_each_port(dp, ds) {
295 if (!dsa_port_is_initialized(dp))
296 continue;
297
298 ret = dsa_slave_suspend(dp->slave);
299 if (ret)
300 return ret;
301 }
302
303 if (ds->ops->suspend)
304 ret = ds->ops->suspend(ds);
305
306 return ret;
307 }
308 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
309
dsa_switch_resume(struct dsa_switch * ds)310 int dsa_switch_resume(struct dsa_switch *ds)
311 {
312 struct dsa_port *dp;
313 int ret = 0;
314
315 if (ds->ops->resume)
316 ret = ds->ops->resume(ds);
317
318 if (ret)
319 return ret;
320
321 /* Resume slave network devices */
322 dsa_switch_for_each_port(dp, ds) {
323 if (!dsa_port_is_initialized(dp))
324 continue;
325
326 ret = dsa_slave_resume(dp->slave);
327 if (ret)
328 return ret;
329 }
330
331 return 0;
332 }
333 EXPORT_SYMBOL_GPL(dsa_switch_resume);
334 #endif
335
336 static struct packet_type dsa_pack_type __read_mostly = {
337 .type = cpu_to_be16(ETH_P_XDSA),
338 .func = dsa_switch_rcv,
339 };
340
341 static struct workqueue_struct *dsa_owq;
342
dsa_schedule_work(struct work_struct * work)343 bool dsa_schedule_work(struct work_struct *work)
344 {
345 return queue_work(dsa_owq, work);
346 }
347
dsa_flush_workqueue(void)348 void dsa_flush_workqueue(void)
349 {
350 flush_workqueue(dsa_owq);
351 }
352
dsa_devlink_param_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx)353 int dsa_devlink_param_get(struct devlink *dl, u32 id,
354 struct devlink_param_gset_ctx *ctx)
355 {
356 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
357
358 if (!ds->ops->devlink_param_get)
359 return -EOPNOTSUPP;
360
361 return ds->ops->devlink_param_get(ds, id, ctx);
362 }
363 EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
364
dsa_devlink_param_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx)365 int dsa_devlink_param_set(struct devlink *dl, u32 id,
366 struct devlink_param_gset_ctx *ctx)
367 {
368 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
369
370 if (!ds->ops->devlink_param_set)
371 return -EOPNOTSUPP;
372
373 return ds->ops->devlink_param_set(ds, id, ctx);
374 }
375 EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
376
dsa_devlink_params_register(struct dsa_switch * ds,const struct devlink_param * params,size_t params_count)377 int dsa_devlink_params_register(struct dsa_switch *ds,
378 const struct devlink_param *params,
379 size_t params_count)
380 {
381 return devlink_params_register(ds->devlink, params, params_count);
382 }
383 EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
384
dsa_devlink_params_unregister(struct dsa_switch * ds,const struct devlink_param * params,size_t params_count)385 void dsa_devlink_params_unregister(struct dsa_switch *ds,
386 const struct devlink_param *params,
387 size_t params_count)
388 {
389 devlink_params_unregister(ds->devlink, params, params_count);
390 }
391 EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
392
dsa_devlink_resource_register(struct dsa_switch * ds,const char * resource_name,u64 resource_size,u64 resource_id,u64 parent_resource_id,const struct devlink_resource_size_params * size_params)393 int dsa_devlink_resource_register(struct dsa_switch *ds,
394 const char *resource_name,
395 u64 resource_size,
396 u64 resource_id,
397 u64 parent_resource_id,
398 const struct devlink_resource_size_params *size_params)
399 {
400 return devlink_resource_register(ds->devlink, resource_name,
401 resource_size, resource_id,
402 parent_resource_id,
403 size_params);
404 }
405 EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
406
dsa_devlink_resources_unregister(struct dsa_switch * ds)407 void dsa_devlink_resources_unregister(struct dsa_switch *ds)
408 {
409 devlink_resources_unregister(ds->devlink, NULL);
410 }
411 EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
412
dsa_devlink_resource_occ_get_register(struct dsa_switch * ds,u64 resource_id,devlink_resource_occ_get_t * occ_get,void * occ_get_priv)413 void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
414 u64 resource_id,
415 devlink_resource_occ_get_t *occ_get,
416 void *occ_get_priv)
417 {
418 return devlink_resource_occ_get_register(ds->devlink, resource_id,
419 occ_get, occ_get_priv);
420 }
421 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
422
dsa_devlink_resource_occ_get_unregister(struct dsa_switch * ds,u64 resource_id)423 void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
424 u64 resource_id)
425 {
426 devlink_resource_occ_get_unregister(ds->devlink, resource_id);
427 }
428 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
429
430 struct devlink_region *
dsa_devlink_region_create(struct dsa_switch * ds,const struct devlink_region_ops * ops,u32 region_max_snapshots,u64 region_size)431 dsa_devlink_region_create(struct dsa_switch *ds,
432 const struct devlink_region_ops *ops,
433 u32 region_max_snapshots, u64 region_size)
434 {
435 return devlink_region_create(ds->devlink, ops, region_max_snapshots,
436 region_size);
437 }
438 EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
439
440 struct devlink_region *
dsa_devlink_port_region_create(struct dsa_switch * ds,int port,const struct devlink_port_region_ops * ops,u32 region_max_snapshots,u64 region_size)441 dsa_devlink_port_region_create(struct dsa_switch *ds,
442 int port,
443 const struct devlink_port_region_ops *ops,
444 u32 region_max_snapshots, u64 region_size)
445 {
446 struct dsa_port *dp = dsa_to_port(ds, port);
447
448 return devlink_port_region_create(&dp->devlink_port, ops,
449 region_max_snapshots,
450 region_size);
451 }
452 EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
453
dsa_devlink_region_destroy(struct devlink_region * region)454 void dsa_devlink_region_destroy(struct devlink_region *region)
455 {
456 devlink_region_destroy(region);
457 }
458 EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
459
dsa_port_from_netdev(struct net_device * netdev)460 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
461 {
462 if (!netdev || !dsa_slave_dev_check(netdev))
463 return ERR_PTR(-ENODEV);
464
465 return dsa_slave_to_port(netdev);
466 }
467 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
468
dsa_init_module(void)469 static int __init dsa_init_module(void)
470 {
471 int rc;
472
473 dsa_owq = alloc_ordered_workqueue("dsa_ordered",
474 WQ_MEM_RECLAIM);
475 if (!dsa_owq)
476 return -ENOMEM;
477
478 rc = dsa_slave_register_notifier();
479 if (rc)
480 goto register_notifier_fail;
481
482 dev_add_pack(&dsa_pack_type);
483
484 dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
485 THIS_MODULE);
486
487 return 0;
488
489 register_notifier_fail:
490 destroy_workqueue(dsa_owq);
491
492 return rc;
493 }
494 module_init(dsa_init_module);
495
dsa_cleanup_module(void)496 static void __exit dsa_cleanup_module(void)
497 {
498 dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
499
500 dsa_slave_unregister_notifier();
501 dev_remove_pack(&dsa_pack_type);
502 destroy_workqueue(dsa_owq);
503 }
504 module_exit(dsa_cleanup_module);
505
506 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
507 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
508 MODULE_LICENSE("GPL");
509 MODULE_ALIAS("platform:dsa");
510