1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
3 *
4 * This module is not a complete tagger implementation. It only provides
5 * primitives for taggers that rely on 802.1Q VLAN tags to use. The
6 * dsa_8021q_netdev_ops is registered for API compliance and not used
7 * directly by callers.
8 */
9 #include <linux/if_vlan.h>
10 #include <linux/dsa/8021q.h>
11
12 #include "dsa_priv.h"
13
14 /* Binary structure of the fake 12-bit VID field (when the TPID is
15 * ETH_P_DSA_8021Q):
16 *
17 * | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
18 * +-----------+-----+-----------------+-----------+-----------------------+
19 * | DIR | VBID| SWITCH_ID | VBID | PORT |
20 * +-----------+-----+-----------------+-----------+-----------------------+
21 *
22 * DIR - VID[11:10]:
23 * Direction flags.
24 * * 1 (0b01) for RX VLAN,
25 * * 2 (0b10) for TX VLAN.
26 * These values make the special VIDs of 0, 1 and 4095 to be left
27 * unused by this coding scheme.
28 *
29 * SWITCH_ID - VID[8:6]:
30 * Index of switch within DSA tree. Must be between 0 and 7.
31 *
32 * VBID - { VID[9], VID[5:4] }:
33 * Virtual bridge ID. If between 1 and 7, packet targets the broadcast
34 * domain of a bridge. If transmitted as zero, packet targets a single
35 * port. Field only valid on transmit, must be ignored on receive.
36 *
37 * PORT - VID[3:0]:
38 * Index of switch port. Must be between 0 and 15.
39 */
40
41 #define DSA_8021Q_DIR_SHIFT 10
42 #define DSA_8021Q_DIR_MASK GENMASK(11, 10)
43 #define DSA_8021Q_DIR(x) (((x) << DSA_8021Q_DIR_SHIFT) & \
44 DSA_8021Q_DIR_MASK)
45 #define DSA_8021Q_DIR_RX DSA_8021Q_DIR(1)
46 #define DSA_8021Q_DIR_TX DSA_8021Q_DIR(2)
47
48 #define DSA_8021Q_SWITCH_ID_SHIFT 6
49 #define DSA_8021Q_SWITCH_ID_MASK GENMASK(8, 6)
50 #define DSA_8021Q_SWITCH_ID(x) (((x) << DSA_8021Q_SWITCH_ID_SHIFT) & \
51 DSA_8021Q_SWITCH_ID_MASK)
52
53 #define DSA_8021Q_VBID_HI_SHIFT 9
54 #define DSA_8021Q_VBID_HI_MASK GENMASK(9, 9)
55 #define DSA_8021Q_VBID_LO_SHIFT 4
56 #define DSA_8021Q_VBID_LO_MASK GENMASK(5, 4)
57 #define DSA_8021Q_VBID_HI(x) (((x) & GENMASK(2, 2)) >> 2)
58 #define DSA_8021Q_VBID_LO(x) ((x) & GENMASK(1, 0))
59 #define DSA_8021Q_VBID(x) \
60 (((DSA_8021Q_VBID_LO(x) << DSA_8021Q_VBID_LO_SHIFT) & \
61 DSA_8021Q_VBID_LO_MASK) | \
62 ((DSA_8021Q_VBID_HI(x) << DSA_8021Q_VBID_HI_SHIFT) & \
63 DSA_8021Q_VBID_HI_MASK))
64
65 #define DSA_8021Q_PORT_SHIFT 0
66 #define DSA_8021Q_PORT_MASK GENMASK(3, 0)
67 #define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \
68 DSA_8021Q_PORT_MASK)
69
dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num)70 u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num)
71 {
72 /* The VBID value of 0 is reserved for precise TX */
73 return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num + 1);
74 }
75 EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid);
76
77 /* Returns the VID to be inserted into the frame from xmit for switch steering
78 * instructions on egress. Encodes switch ID and port ID.
79 */
dsa_tag_8021q_tx_vid(const struct dsa_port * dp)80 u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp)
81 {
82 return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(dp->ds->index) |
83 DSA_8021Q_PORT(dp->index);
84 }
85 EXPORT_SYMBOL_GPL(dsa_tag_8021q_tx_vid);
86
87 /* Returns the VID that will be installed as pvid for this switch port, sent as
88 * tagged egress towards the CPU port and decoded by the rcv function.
89 */
dsa_tag_8021q_rx_vid(const struct dsa_port * dp)90 u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp)
91 {
92 return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(dp->ds->index) |
93 DSA_8021Q_PORT(dp->index);
94 }
95 EXPORT_SYMBOL_GPL(dsa_tag_8021q_rx_vid);
96
97 /* Returns the decoded switch ID from the RX VID. */
dsa_8021q_rx_switch_id(u16 vid)98 int dsa_8021q_rx_switch_id(u16 vid)
99 {
100 return (vid & DSA_8021Q_SWITCH_ID_MASK) >> DSA_8021Q_SWITCH_ID_SHIFT;
101 }
102 EXPORT_SYMBOL_GPL(dsa_8021q_rx_switch_id);
103
104 /* Returns the decoded port ID from the RX VID. */
dsa_8021q_rx_source_port(u16 vid)105 int dsa_8021q_rx_source_port(u16 vid)
106 {
107 return (vid & DSA_8021Q_PORT_MASK) >> DSA_8021Q_PORT_SHIFT;
108 }
109 EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port);
110
vid_is_dsa_8021q_rxvlan(u16 vid)111 bool vid_is_dsa_8021q_rxvlan(u16 vid)
112 {
113 return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_RX;
114 }
115 EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_rxvlan);
116
vid_is_dsa_8021q_txvlan(u16 vid)117 bool vid_is_dsa_8021q_txvlan(u16 vid)
118 {
119 return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_TX;
120 }
121 EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_txvlan);
122
vid_is_dsa_8021q(u16 vid)123 bool vid_is_dsa_8021q(u16 vid)
124 {
125 return vid_is_dsa_8021q_rxvlan(vid) || vid_is_dsa_8021q_txvlan(vid);
126 }
127 EXPORT_SYMBOL_GPL(vid_is_dsa_8021q);
128
129 static struct dsa_tag_8021q_vlan *
dsa_tag_8021q_vlan_find(struct dsa_8021q_context * ctx,int port,u16 vid)130 dsa_tag_8021q_vlan_find(struct dsa_8021q_context *ctx, int port, u16 vid)
131 {
132 struct dsa_tag_8021q_vlan *v;
133
134 list_for_each_entry(v, &ctx->vlans, list)
135 if (v->vid == vid && v->port == port)
136 return v;
137
138 return NULL;
139 }
140
dsa_port_do_tag_8021q_vlan_add(struct dsa_port * dp,u16 vid,u16 flags)141 static int dsa_port_do_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid,
142 u16 flags)
143 {
144 struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
145 struct dsa_switch *ds = dp->ds;
146 struct dsa_tag_8021q_vlan *v;
147 int port = dp->index;
148 int err;
149
150 /* No need to bother with refcounting for user ports */
151 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
152 return ds->ops->tag_8021q_vlan_add(ds, port, vid, flags);
153
154 v = dsa_tag_8021q_vlan_find(ctx, port, vid);
155 if (v) {
156 refcount_inc(&v->refcount);
157 return 0;
158 }
159
160 v = kzalloc(sizeof(*v), GFP_KERNEL);
161 if (!v)
162 return -ENOMEM;
163
164 err = ds->ops->tag_8021q_vlan_add(ds, port, vid, flags);
165 if (err) {
166 kfree(v);
167 return err;
168 }
169
170 v->vid = vid;
171 v->port = port;
172 refcount_set(&v->refcount, 1);
173 list_add_tail(&v->list, &ctx->vlans);
174
175 return 0;
176 }
177
dsa_port_do_tag_8021q_vlan_del(struct dsa_port * dp,u16 vid)178 static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
179 {
180 struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
181 struct dsa_switch *ds = dp->ds;
182 struct dsa_tag_8021q_vlan *v;
183 int port = dp->index;
184 int err;
185
186 /* No need to bother with refcounting for user ports */
187 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
188 return ds->ops->tag_8021q_vlan_del(ds, port, vid);
189
190 v = dsa_tag_8021q_vlan_find(ctx, port, vid);
191 if (!v)
192 return -ENOENT;
193
194 if (!refcount_dec_and_test(&v->refcount))
195 return 0;
196
197 err = ds->ops->tag_8021q_vlan_del(ds, port, vid);
198 if (err) {
199 refcount_inc(&v->refcount);
200 return err;
201 }
202
203 list_del(&v->list);
204 kfree(v);
205
206 return 0;
207 }
208
209 static bool
dsa_port_tag_8021q_vlan_match(struct dsa_port * dp,struct dsa_notifier_tag_8021q_vlan_info * info)210 dsa_port_tag_8021q_vlan_match(struct dsa_port *dp,
211 struct dsa_notifier_tag_8021q_vlan_info *info)
212 {
213 struct dsa_switch *ds = dp->ds;
214
215 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
216 return true;
217
218 if (ds->dst->index == info->tree_index && ds->index == info->sw_index)
219 return dp->index == info->port;
220
221 return false;
222 }
223
dsa_switch_tag_8021q_vlan_add(struct dsa_switch * ds,struct dsa_notifier_tag_8021q_vlan_info * info)224 int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
225 struct dsa_notifier_tag_8021q_vlan_info *info)
226 {
227 struct dsa_port *dp;
228 int err;
229
230 /* Since we use dsa_broadcast(), there might be other switches in other
231 * trees which don't support tag_8021q, so don't return an error.
232 * Or they might even support tag_8021q but have not registered yet to
233 * use it (maybe they use another tagger currently).
234 */
235 if (!ds->ops->tag_8021q_vlan_add || !ds->tag_8021q_ctx)
236 return 0;
237
238 dsa_switch_for_each_port(dp, ds) {
239 if (dsa_port_tag_8021q_vlan_match(dp, info)) {
240 u16 flags = 0;
241
242 if (dsa_port_is_user(dp))
243 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
244
245 if (vid_is_dsa_8021q_rxvlan(info->vid) &&
246 dsa_8021q_rx_switch_id(info->vid) == ds->index &&
247 dsa_8021q_rx_source_port(info->vid) == dp->index)
248 flags |= BRIDGE_VLAN_INFO_PVID;
249
250 err = dsa_port_do_tag_8021q_vlan_add(dp, info->vid,
251 flags);
252 if (err)
253 return err;
254 }
255 }
256
257 return 0;
258 }
259
dsa_switch_tag_8021q_vlan_del(struct dsa_switch * ds,struct dsa_notifier_tag_8021q_vlan_info * info)260 int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
261 struct dsa_notifier_tag_8021q_vlan_info *info)
262 {
263 struct dsa_port *dp;
264 int err;
265
266 if (!ds->ops->tag_8021q_vlan_del || !ds->tag_8021q_ctx)
267 return 0;
268
269 dsa_switch_for_each_port(dp, ds) {
270 if (dsa_port_tag_8021q_vlan_match(dp, info)) {
271 err = dsa_port_do_tag_8021q_vlan_del(dp, info->vid);
272 if (err)
273 return err;
274 }
275 }
276
277 return 0;
278 }
279
280 /* RX VLAN tagging (left) and TX VLAN tagging (right) setup shown for a single
281 * front-panel switch port (here swp0).
282 *
283 * Port identification through VLAN (802.1Q) tags has different requirements
284 * for it to work effectively:
285 * - On RX (ingress from network): each front-panel port must have a pvid
286 * that uniquely identifies it, and the egress of this pvid must be tagged
287 * towards the CPU port, so that software can recover the source port based
288 * on the VID in the frame. But this would only work for standalone ports;
289 * if bridged, this VLAN setup would break autonomous forwarding and would
290 * force all switched traffic to pass through the CPU. So we must also make
291 * the other front-panel ports members of this VID we're adding, albeit
292 * we're not making it their PVID (they'll still have their own).
293 * - On TX (ingress from CPU and towards network) we are faced with a problem.
294 * If we were to tag traffic (from within DSA) with the port's pvid, all
295 * would be well, assuming the switch ports were standalone. Frames would
296 * have no choice but to be directed towards the correct front-panel port.
297 * But because we also want the RX VLAN to not break bridging, then
298 * inevitably that means that we have to give them a choice (of what
299 * front-panel port to go out on), and therefore we cannot steer traffic
300 * based on the RX VID. So what we do is simply install one more VID on the
301 * front-panel and CPU ports, and profit off of the fact that steering will
302 * work just by virtue of the fact that there is only one other port that's
303 * a member of the VID we're tagging the traffic with - the desired one.
304 *
305 * So at the end, each front-panel port will have one RX VID (also the PVID),
306 * the RX VID of all other front-panel ports that are in the same bridge, and
307 * one TX VID. Whereas the CPU port will have the RX and TX VIDs of all
308 * front-panel ports, and on top of that, is also tagged-input and
309 * tagged-output (VLAN trunk).
310 *
311 * CPU port CPU port
312 * +-------------+-----+-------------+ +-------------+-----+-------------+
313 * | RX VID | | | | TX VID | | |
314 * | of swp0 | | | | of swp0 | | |
315 * | +-----+ | | +-----+ |
316 * | ^ T | | | Tagged |
317 * | | | | | ingress |
318 * | +-------+---+---+-------+ | | +-----------+ |
319 * | | | | | | | | Untagged |
320 * | | U v U v U v | | v egress |
321 * | +-----+ +-----+ +-----+ +-----+ | | +-----+ +-----+ +-----+ +-----+ |
322 * | | | | | | | | | | | | | | | | | | | |
323 * | |PVID | | | | | | | | | | | | | | | | | |
324 * +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+
325 * swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3
326 */
327 static bool
dsa_port_tag_8021q_bridge_match(struct dsa_port * dp,struct dsa_notifier_bridge_info * info)328 dsa_port_tag_8021q_bridge_match(struct dsa_port *dp,
329 struct dsa_notifier_bridge_info *info)
330 {
331 /* Don't match on self */
332 if (dp->ds->dst->index == info->tree_index &&
333 dp->ds->index == info->sw_index &&
334 dp->index == info->port)
335 return false;
336
337 if (dsa_port_is_user(dp))
338 return dp->bridge_dev == info->br;
339
340 return false;
341 }
342
dsa_tag_8021q_bridge_join(struct dsa_switch * ds,struct dsa_notifier_bridge_info * info)343 int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
344 struct dsa_notifier_bridge_info *info)
345 {
346 struct dsa_switch *targeted_ds;
347 struct dsa_port *targeted_dp;
348 struct dsa_port *dp;
349 u16 targeted_rx_vid;
350 int err;
351
352 if (!ds->tag_8021q_ctx)
353 return 0;
354
355 targeted_ds = dsa_switch_find(info->tree_index, info->sw_index);
356 targeted_dp = dsa_to_port(targeted_ds, info->port);
357 targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp);
358
359 dsa_switch_for_each_port(dp, ds) {
360 u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
361
362 if (!dsa_port_tag_8021q_bridge_match(dp, info))
363 continue;
364
365 /* Install the RX VID of the targeted port in our VLAN table */
366 err = dsa_port_tag_8021q_vlan_add(dp, targeted_rx_vid, true);
367 if (err)
368 return err;
369
370 /* Install our RX VID into the targeted port's VLAN table */
371 err = dsa_port_tag_8021q_vlan_add(targeted_dp, rx_vid, true);
372 if (err)
373 return err;
374 }
375
376 return 0;
377 }
378
dsa_tag_8021q_bridge_leave(struct dsa_switch * ds,struct dsa_notifier_bridge_info * info)379 int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
380 struct dsa_notifier_bridge_info *info)
381 {
382 struct dsa_switch *targeted_ds;
383 struct dsa_port *targeted_dp;
384 struct dsa_port *dp;
385 u16 targeted_rx_vid;
386
387 if (!ds->tag_8021q_ctx)
388 return 0;
389
390 targeted_ds = dsa_switch_find(info->tree_index, info->sw_index);
391 targeted_dp = dsa_to_port(targeted_ds, info->port);
392 targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp);
393
394 dsa_switch_for_each_port(dp, ds) {
395 u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
396
397 if (!dsa_port_tag_8021q_bridge_match(dp, info))
398 continue;
399
400 /* Remove the RX VID of the targeted port from our VLAN table */
401 dsa_port_tag_8021q_vlan_del(dp, targeted_rx_vid, true);
402
403 /* Remove our RX VID from the targeted port's VLAN table */
404 dsa_port_tag_8021q_vlan_del(targeted_dp, rx_vid, true);
405 }
406
407 return 0;
408 }
409
dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch * ds,int port,struct net_device * br,int bridge_num)410 int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
411 struct net_device *br,
412 int bridge_num)
413 {
414 u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num);
415
416 return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid,
417 true);
418 }
419 EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_offload);
420
dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch * ds,int port,struct net_device * br,int bridge_num)421 void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
422 struct net_device *br,
423 int bridge_num)
424 {
425 u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num);
426
427 dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid, true);
428 }
429 EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_unoffload);
430
431 /* Set up a port's tag_8021q RX and TX VLAN for standalone mode operation */
dsa_tag_8021q_port_setup(struct dsa_switch * ds,int port)432 static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
433 {
434 struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
435 struct dsa_port *dp = dsa_to_port(ds, port);
436 u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
437 u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
438 struct net_device *master;
439 int err;
440
441 /* The CPU port is implicitly configured by
442 * configuring the front-panel ports
443 */
444 if (!dsa_port_is_user(dp))
445 return 0;
446
447 master = dp->cpu_dp->master;
448
449 /* Add this user port's RX VID to the membership list of all others
450 * (including itself). This is so that bridging will not be hindered.
451 * L2 forwarding rules still take precedence when there are no VLAN
452 * restrictions, so there are no concerns about leaking traffic.
453 */
454 err = dsa_port_tag_8021q_vlan_add(dp, rx_vid, false);
455 if (err) {
456 dev_err(ds->dev,
457 "Failed to apply RX VID %d to port %d: %pe\n",
458 rx_vid, port, ERR_PTR(err));
459 return err;
460 }
461
462 /* Add @rx_vid to the master's RX filter. */
463 vlan_vid_add(master, ctx->proto, rx_vid);
464
465 /* Finally apply the TX VID on this port and on the CPU port */
466 err = dsa_port_tag_8021q_vlan_add(dp, tx_vid, false);
467 if (err) {
468 dev_err(ds->dev,
469 "Failed to apply TX VID %d on port %d: %pe\n",
470 tx_vid, port, ERR_PTR(err));
471 return err;
472 }
473
474 return err;
475 }
476
dsa_tag_8021q_port_teardown(struct dsa_switch * ds,int port)477 static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
478 {
479 struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
480 struct dsa_port *dp = dsa_to_port(ds, port);
481 u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
482 u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
483 struct net_device *master;
484
485 /* The CPU port is implicitly configured by
486 * configuring the front-panel ports
487 */
488 if (!dsa_port_is_user(dp))
489 return;
490
491 master = dp->cpu_dp->master;
492
493 dsa_port_tag_8021q_vlan_del(dp, rx_vid, false);
494
495 vlan_vid_del(master, ctx->proto, rx_vid);
496
497 dsa_port_tag_8021q_vlan_del(dp, tx_vid, false);
498 }
499
dsa_tag_8021q_setup(struct dsa_switch * ds)500 static int dsa_tag_8021q_setup(struct dsa_switch *ds)
501 {
502 int err, port;
503
504 ASSERT_RTNL();
505
506 for (port = 0; port < ds->num_ports; port++) {
507 err = dsa_tag_8021q_port_setup(ds, port);
508 if (err < 0) {
509 dev_err(ds->dev,
510 "Failed to setup VLAN tagging for port %d: %pe\n",
511 port, ERR_PTR(err));
512 return err;
513 }
514 }
515
516 return 0;
517 }
518
dsa_tag_8021q_teardown(struct dsa_switch * ds)519 static void dsa_tag_8021q_teardown(struct dsa_switch *ds)
520 {
521 int port;
522
523 ASSERT_RTNL();
524
525 for (port = 0; port < ds->num_ports; port++)
526 dsa_tag_8021q_port_teardown(ds, port);
527 }
528
dsa_tag_8021q_register(struct dsa_switch * ds,__be16 proto)529 int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
530 {
531 struct dsa_8021q_context *ctx;
532
533 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
534 if (!ctx)
535 return -ENOMEM;
536
537 ctx->proto = proto;
538 ctx->ds = ds;
539
540 INIT_LIST_HEAD(&ctx->vlans);
541
542 ds->tag_8021q_ctx = ctx;
543
544 return dsa_tag_8021q_setup(ds);
545 }
546 EXPORT_SYMBOL_GPL(dsa_tag_8021q_register);
547
dsa_tag_8021q_unregister(struct dsa_switch * ds)548 void dsa_tag_8021q_unregister(struct dsa_switch *ds)
549 {
550 struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
551 struct dsa_tag_8021q_vlan *v, *n;
552
553 dsa_tag_8021q_teardown(ds);
554
555 list_for_each_entry_safe(v, n, &ctx->vlans, list) {
556 list_del(&v->list);
557 kfree(v);
558 }
559
560 ds->tag_8021q_ctx = NULL;
561
562 kfree(ctx);
563 }
564 EXPORT_SYMBOL_GPL(dsa_tag_8021q_unregister);
565
dsa_8021q_xmit(struct sk_buff * skb,struct net_device * netdev,u16 tpid,u16 tci)566 struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
567 u16 tpid, u16 tci)
568 {
569 /* skb->data points at skb_mac_header, which
570 * is fine for vlan_insert_tag.
571 */
572 return vlan_insert_tag(skb, htons(tpid), tci);
573 }
574 EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
575
dsa_8021q_rcv(struct sk_buff * skb,int * source_port,int * switch_id)576 void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id)
577 {
578 u16 vid, tci;
579
580 skb_push_rcsum(skb, ETH_HLEN);
581 if (skb_vlan_tag_present(skb)) {
582 tci = skb_vlan_tag_get(skb);
583 __vlan_hwaccel_clear_tag(skb);
584 } else {
585 __skb_vlan_pop(skb, &tci);
586 }
587 skb_pull_rcsum(skb, ETH_HLEN);
588
589 vid = tci & VLAN_VID_MASK;
590
591 *source_port = dsa_8021q_rx_source_port(vid);
592 *switch_id = dsa_8021q_rx_switch_id(vid);
593 skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
594 }
595 EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
596