1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	unsigned long event;
20 };
21 
sparx5_port_attr_bridge_flags(struct sparx5_port * port,struct switchdev_brport_flags flags)22 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
23 					  struct switchdev_brport_flags flags)
24 {
25 	if (flags.mask & BR_MCAST_FLOOD)
26 		sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
27 }
28 
sparx5_attr_stp_state_set(struct sparx5_port * port,u8 state)29 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
30 				      u8 state)
31 {
32 	struct sparx5 *sparx5 = port->sparx5;
33 
34 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
35 		netdev_err(port->ndev,
36 			   "Controlling non-bridged port %d?\n", port->portno);
37 		return;
38 	}
39 
40 	switch (state) {
41 	case BR_STATE_FORWARDING:
42 		set_bit(port->portno, sparx5->bridge_fwd_mask);
43 		fallthrough;
44 	case BR_STATE_LEARNING:
45 		set_bit(port->portno, sparx5->bridge_lrn_mask);
46 		break;
47 
48 	default:
49 		/* All other states treated as blocking */
50 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
51 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
52 		break;
53 	}
54 
55 	/* apply the bridge_fwd_mask to all the ports */
56 	sparx5_update_fwd(sparx5);
57 }
58 
sparx5_port_attr_ageing_set(struct sparx5_port * port,unsigned long ageing_clock_t)59 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
60 					unsigned long ageing_clock_t)
61 {
62 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
63 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
64 
65 	sparx5_set_ageing(port->sparx5, ageing_time);
66 }
67 
sparx5_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)68 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
69 				const struct switchdev_attr *attr,
70 				struct netlink_ext_ack *extack)
71 {
72 	struct sparx5_port *port = netdev_priv(dev);
73 
74 	switch (attr->id) {
75 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
76 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
77 		break;
78 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
79 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
80 		break;
81 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
82 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
83 		break;
84 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
85 		port->vlan_aware = attr->u.vlan_filtering;
86 		sparx5_vlan_port_apply(port->sparx5, port);
87 		break;
88 	default:
89 		return -EOPNOTSUPP;
90 	}
91 
92 	return 0;
93 }
94 
sparx5_port_bridge_join(struct sparx5_port * port,struct net_device * bridge,struct netlink_ext_ack * extack)95 static int sparx5_port_bridge_join(struct sparx5_port *port,
96 				   struct net_device *bridge,
97 				   struct netlink_ext_ack *extack)
98 {
99 	struct sparx5 *sparx5 = port->sparx5;
100 	struct net_device *ndev = port->ndev;
101 	int err;
102 
103 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
104 		/* First bridged port */
105 		sparx5->hw_bridge_dev = bridge;
106 	else
107 		if (sparx5->hw_bridge_dev != bridge)
108 			/* This is adding the port to a second bridge, this is
109 			 * unsupported
110 			 */
111 			return -ENODEV;
112 
113 	set_bit(port->portno, sparx5->bridge_mask);
114 
115 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
116 					    false, extack);
117 	if (err)
118 		goto err_switchdev_offload;
119 
120 	/* Port enters in bridge mode therefor don't need to copy to CPU
121 	 * frames for multicast in case the bridge is not requesting them
122 	 */
123 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
124 
125 	return 0;
126 
127 err_switchdev_offload:
128 	clear_bit(port->portno, sparx5->bridge_mask);
129 	return err;
130 }
131 
sparx5_port_bridge_leave(struct sparx5_port * port,struct net_device * bridge)132 static void sparx5_port_bridge_leave(struct sparx5_port *port,
133 				     struct net_device *bridge)
134 {
135 	struct sparx5 *sparx5 = port->sparx5;
136 
137 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
138 
139 	clear_bit(port->portno, sparx5->bridge_mask);
140 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
141 		sparx5->hw_bridge_dev = NULL;
142 
143 	/* Clear bridge vlan settings before updating the port settings */
144 	port->vlan_aware = 0;
145 	port->pvid = NULL_VID;
146 	port->vid = NULL_VID;
147 
148 	/* Port enters in host more therefore restore mc list */
149 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
150 }
151 
sparx5_port_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)152 static int sparx5_port_changeupper(struct net_device *dev,
153 				   struct netdev_notifier_changeupper_info *info)
154 {
155 	struct sparx5_port *port = netdev_priv(dev);
156 	struct netlink_ext_ack *extack;
157 	int err = 0;
158 
159 	extack = netdev_notifier_info_to_extack(&info->info);
160 
161 	if (netif_is_bridge_master(info->upper_dev)) {
162 		if (info->linking)
163 			err = sparx5_port_bridge_join(port, info->upper_dev,
164 						      extack);
165 		else
166 			sparx5_port_bridge_leave(port, info->upper_dev);
167 
168 		sparx5_vlan_port_apply(port->sparx5, port);
169 	}
170 
171 	return err;
172 }
173 
sparx5_port_add_addr(struct net_device * dev,bool up)174 static int sparx5_port_add_addr(struct net_device *dev, bool up)
175 {
176 	struct sparx5_port *port = netdev_priv(dev);
177 	struct sparx5 *sparx5 = port->sparx5;
178 	u16 vid = port->pvid;
179 
180 	if (up)
181 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
182 	else
183 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
184 
185 	return 0;
186 }
187 
sparx5_netdevice_port_event(struct net_device * dev,struct notifier_block * nb,unsigned long event,void * ptr)188 static int sparx5_netdevice_port_event(struct net_device *dev,
189 				       struct notifier_block *nb,
190 				       unsigned long event, void *ptr)
191 {
192 	int err = 0;
193 
194 	if (!sparx5_netdevice_check(dev))
195 		return 0;
196 
197 	switch (event) {
198 	case NETDEV_CHANGEUPPER:
199 		err = sparx5_port_changeupper(dev, ptr);
200 		break;
201 	case NETDEV_PRE_UP:
202 		err = sparx5_port_add_addr(dev, true);
203 		break;
204 	case NETDEV_DOWN:
205 		err = sparx5_port_add_addr(dev, false);
206 		break;
207 	}
208 
209 	return err;
210 }
211 
sparx5_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)212 static int sparx5_netdevice_event(struct notifier_block *nb,
213 				  unsigned long event, void *ptr)
214 {
215 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
216 	int ret = 0;
217 
218 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
219 
220 	return notifier_from_errno(ret);
221 }
222 
sparx5_switchdev_bridge_fdb_event_work(struct work_struct * work)223 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
224 {
225 	struct sparx5_switchdev_event_work *switchdev_work =
226 		container_of(work, struct sparx5_switchdev_event_work, work);
227 	struct net_device *dev = switchdev_work->dev;
228 	struct switchdev_notifier_fdb_info *fdb_info;
229 	struct sparx5_port *port;
230 	struct sparx5 *sparx5;
231 
232 	rtnl_lock();
233 	if (!sparx5_netdevice_check(dev))
234 		goto out;
235 
236 	port = netdev_priv(dev);
237 	sparx5 = port->sparx5;
238 
239 	fdb_info = &switchdev_work->fdb_info;
240 
241 	switch (switchdev_work->event) {
242 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
243 		if (!fdb_info->added_by_user)
244 			break;
245 		sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
246 				      fdb_info->vid);
247 		break;
248 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
249 		if (!fdb_info->added_by_user)
250 			break;
251 		sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
252 		break;
253 	}
254 
255 out:
256 	rtnl_unlock();
257 	kfree(switchdev_work->fdb_info.addr);
258 	kfree(switchdev_work);
259 	dev_put(dev);
260 }
261 
sparx5_schedule_work(struct work_struct * work)262 static void sparx5_schedule_work(struct work_struct *work)
263 {
264 	queue_work(sparx5_owq, work);
265 }
266 
sparx5_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)267 static int sparx5_switchdev_event(struct notifier_block *unused,
268 				  unsigned long event, void *ptr)
269 {
270 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
271 	struct sparx5_switchdev_event_work *switchdev_work;
272 	struct switchdev_notifier_fdb_info *fdb_info;
273 	struct switchdev_notifier_info *info = ptr;
274 	int err;
275 
276 	switch (event) {
277 	case SWITCHDEV_PORT_ATTR_SET:
278 		err = switchdev_handle_port_attr_set(dev, ptr,
279 						     sparx5_netdevice_check,
280 						     sparx5_port_attr_set);
281 		return notifier_from_errno(err);
282 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
283 		fallthrough;
284 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
285 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
286 		if (!switchdev_work)
287 			return NOTIFY_BAD;
288 
289 		switchdev_work->dev = dev;
290 		switchdev_work->event = event;
291 
292 		fdb_info = container_of(info,
293 					struct switchdev_notifier_fdb_info,
294 					info);
295 		INIT_WORK(&switchdev_work->work,
296 			  sparx5_switchdev_bridge_fdb_event_work);
297 		memcpy(&switchdev_work->fdb_info, ptr,
298 		       sizeof(switchdev_work->fdb_info));
299 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
300 		if (!switchdev_work->fdb_info.addr)
301 			goto err_addr_alloc;
302 
303 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
304 				fdb_info->addr);
305 		dev_hold(dev);
306 
307 		sparx5_schedule_work(&switchdev_work->work);
308 		break;
309 	}
310 
311 	return NOTIFY_DONE;
312 err_addr_alloc:
313 	kfree(switchdev_work);
314 	return NOTIFY_BAD;
315 }
316 
sparx5_sync_port_dev_addr(struct sparx5 * sparx5,struct sparx5_port * port,u16 vid,bool add)317 static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
318 				      struct sparx5_port *port,
319 				      u16 vid, bool add)
320 {
321 	if (!port ||
322 	    !test_bit(port->portno, sparx5->bridge_mask))
323 		return; /* Skip null/host interfaces */
324 
325 	/* Bridge connects to vid? */
326 	if (add) {
327 		/* Add port MAC address from the VLAN */
328 		sparx5_mact_learn(sparx5, PGID_CPU,
329 				  port->ndev->dev_addr, vid);
330 	} else {
331 		/* Control port addr visibility depending on
332 		 * port VLAN connectivity.
333 		 */
334 		if (test_bit(port->portno, sparx5->vlan_mask[vid]))
335 			sparx5_mact_learn(sparx5, PGID_CPU,
336 					  port->ndev->dev_addr, vid);
337 		else
338 			sparx5_mact_forget(sparx5,
339 					   port->ndev->dev_addr, vid);
340 	}
341 }
342 
sparx5_sync_bridge_dev_addr(struct net_device * dev,struct sparx5 * sparx5,u16 vid,bool add)343 static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
344 					struct sparx5 *sparx5,
345 					u16 vid, bool add)
346 {
347 	int i;
348 
349 	/* First, handle bridge address'es */
350 	if (add) {
351 		sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
352 				  vid);
353 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
354 				  vid);
355 	} else {
356 		sparx5_mact_forget(sparx5, dev->dev_addr, vid);
357 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
358 	}
359 
360 	/* Now look at bridged ports */
361 	for (i = 0; i < SPX5_PORTS; i++)
362 		sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
363 }
364 
sparx5_handle_port_vlan_add(struct net_device * dev,struct notifier_block * nb,const struct switchdev_obj_port_vlan * v)365 static int sparx5_handle_port_vlan_add(struct net_device *dev,
366 				       struct notifier_block *nb,
367 				       const struct switchdev_obj_port_vlan *v)
368 {
369 	struct sparx5_port *port = netdev_priv(dev);
370 
371 	if (netif_is_bridge_master(dev)) {
372 		if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
373 			struct sparx5 *sparx5 =
374 				container_of(nb, struct sparx5,
375 					     switchdev_blocking_nb);
376 
377 			sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
378 		}
379 		return 0;
380 	}
381 
382 	if (!sparx5_netdevice_check(dev))
383 		return -EOPNOTSUPP;
384 
385 	return sparx5_vlan_vid_add(port, v->vid,
386 				  v->flags & BRIDGE_VLAN_INFO_PVID,
387 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
388 }
389 
sparx5_handle_port_obj_add(struct net_device * dev,struct notifier_block * nb,struct switchdev_notifier_port_obj_info * info)390 static int sparx5_handle_port_obj_add(struct net_device *dev,
391 				      struct notifier_block *nb,
392 				      struct switchdev_notifier_port_obj_info *info)
393 {
394 	const struct switchdev_obj *obj = info->obj;
395 	int err;
396 
397 	switch (obj->id) {
398 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
399 		err = sparx5_handle_port_vlan_add(dev, nb,
400 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
401 		break;
402 	default:
403 		err = -EOPNOTSUPP;
404 		break;
405 	}
406 
407 	info->handled = true;
408 	return err;
409 }
410 
sparx5_handle_port_vlan_del(struct net_device * dev,struct notifier_block * nb,u16 vid)411 static int sparx5_handle_port_vlan_del(struct net_device *dev,
412 				       struct notifier_block *nb,
413 				       u16 vid)
414 {
415 	struct sparx5_port *port = netdev_priv(dev);
416 	int ret;
417 
418 	/* Master bridge? */
419 	if (netif_is_bridge_master(dev)) {
420 		struct sparx5 *sparx5 =
421 			container_of(nb, struct sparx5,
422 				     switchdev_blocking_nb);
423 
424 		sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
425 		return 0;
426 	}
427 
428 	if (!sparx5_netdevice_check(dev))
429 		return -EOPNOTSUPP;
430 
431 	ret = sparx5_vlan_vid_del(port, vid);
432 	if (ret)
433 		return ret;
434 
435 	/* Delete the port MAC address with the matching VLAN information */
436 	sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
437 
438 	return 0;
439 }
440 
sparx5_handle_port_obj_del(struct net_device * dev,struct notifier_block * nb,struct switchdev_notifier_port_obj_info * info)441 static int sparx5_handle_port_obj_del(struct net_device *dev,
442 				      struct notifier_block *nb,
443 				      struct switchdev_notifier_port_obj_info *info)
444 {
445 	const struct switchdev_obj *obj = info->obj;
446 	int err;
447 
448 	switch (obj->id) {
449 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
450 		err = sparx5_handle_port_vlan_del(dev, nb,
451 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
452 		break;
453 	default:
454 		err = -EOPNOTSUPP;
455 		break;
456 	}
457 
458 	info->handled = true;
459 	return err;
460 }
461 
sparx5_switchdev_blocking_event(struct notifier_block * nb,unsigned long event,void * ptr)462 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
463 					   unsigned long event,
464 					   void *ptr)
465 {
466 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
467 	int err;
468 
469 	switch (event) {
470 	case SWITCHDEV_PORT_OBJ_ADD:
471 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
472 		return notifier_from_errno(err);
473 	case SWITCHDEV_PORT_OBJ_DEL:
474 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
475 		return notifier_from_errno(err);
476 	case SWITCHDEV_PORT_ATTR_SET:
477 		err = switchdev_handle_port_attr_set(dev, ptr,
478 						     sparx5_netdevice_check,
479 						     sparx5_port_attr_set);
480 		return notifier_from_errno(err);
481 	}
482 
483 	return NOTIFY_DONE;
484 }
485 
sparx5_register_notifier_blocks(struct sparx5 * s5)486 int sparx5_register_notifier_blocks(struct sparx5 *s5)
487 {
488 	int err;
489 
490 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
491 	err = register_netdevice_notifier(&s5->netdevice_nb);
492 	if (err)
493 		return err;
494 
495 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
496 	err = register_switchdev_notifier(&s5->switchdev_nb);
497 	if (err)
498 		goto err_switchdev_nb;
499 
500 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
501 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
502 	if (err)
503 		goto err_switchdev_blocking_nb;
504 
505 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
506 	if (!sparx5_owq) {
507 		err = -ENOMEM;
508 		goto err_switchdev_blocking_nb;
509 	}
510 
511 	return 0;
512 
513 err_switchdev_blocking_nb:
514 	unregister_switchdev_notifier(&s5->switchdev_nb);
515 err_switchdev_nb:
516 	unregister_netdevice_notifier(&s5->netdevice_nb);
517 
518 	return err;
519 }
520 
sparx5_unregister_notifier_blocks(struct sparx5 * s5)521 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
522 {
523 	destroy_workqueue(sparx5_owq);
524 
525 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
526 	unregister_switchdev_notifier(&s5->switchdev_nb);
527 	unregister_netdevice_notifier(&s5->netdevice_nb);
528 }
529