1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CAN bus driver for the alone generic (as possible as) MSCAN controller.
4  *
5  * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
6  *                         Varma Electronics Oy
7  * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
8  * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/delay.h>
15 #include <linux/netdevice.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_ether.h>
18 #include <linux/list.h>
19 #include <linux/can/dev.h>
20 #include <linux/can/error.h>
21 #include <linux/io.h>
22 
23 #include "mscan.h"
24 
25 static const struct can_bittiming_const mscan_bittiming_const = {
26 	.name = "mscan",
27 	.tseg1_min = 4,
28 	.tseg1_max = 16,
29 	.tseg2_min = 2,
30 	.tseg2_max = 8,
31 	.sjw_max = 4,
32 	.brp_min = 1,
33 	.brp_max = 64,
34 	.brp_inc = 1,
35 };
36 
37 struct mscan_state {
38 	u8 mode;
39 	u8 canrier;
40 	u8 cantier;
41 };
42 
43 static enum can_state state_map[] = {
44 	CAN_STATE_ERROR_ACTIVE,
45 	CAN_STATE_ERROR_WARNING,
46 	CAN_STATE_ERROR_PASSIVE,
47 	CAN_STATE_BUS_OFF
48 };
49 
mscan_set_mode(struct net_device * dev,u8 mode)50 static int mscan_set_mode(struct net_device *dev, u8 mode)
51 {
52 	struct mscan_priv *priv = netdev_priv(dev);
53 	struct mscan_regs __iomem *regs = priv->reg_base;
54 	int ret = 0;
55 	int i;
56 	u8 canctl1;
57 
58 	if (mode != MSCAN_NORMAL_MODE) {
59 		if (priv->tx_active) {
60 			/* Abort transfers before going to sleep */#
61 			out_8(&regs->cantarq, priv->tx_active);
62 			/* Suppress TX done interrupts */
63 			out_8(&regs->cantier, 0);
64 		}
65 
66 		canctl1 = in_8(&regs->canctl1);
67 		if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
68 			setbits8(&regs->canctl0, MSCAN_SLPRQ);
69 			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
70 				if (in_8(&regs->canctl1) & MSCAN_SLPAK)
71 					break;
72 				udelay(100);
73 			}
74 			/*
75 			 * The mscan controller will fail to enter sleep mode,
76 			 * while there are irregular activities on bus, like
77 			 * somebody keeps retransmitting. This behavior is
78 			 * undocumented and seems to differ between mscan built
79 			 * in mpc5200b and mpc5200. We proceed in that case,
80 			 * since otherwise the slprq will be kept set and the
81 			 * controller will get stuck. NOTE: INITRQ or CSWAI
82 			 * will abort all active transmit actions, if still
83 			 * any, at once.
84 			 */
85 			if (i >= MSCAN_SET_MODE_RETRIES)
86 				netdev_dbg(dev,
87 					   "device failed to enter sleep mode. "
88 					   "We proceed anyhow.\n");
89 			else
90 				priv->can.state = CAN_STATE_SLEEPING;
91 		}
92 
93 		if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
94 			setbits8(&regs->canctl0, MSCAN_INITRQ);
95 			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
96 				if (in_8(&regs->canctl1) & MSCAN_INITAK)
97 					break;
98 			}
99 			if (i >= MSCAN_SET_MODE_RETRIES)
100 				ret = -ENODEV;
101 		}
102 		if (!ret)
103 			priv->can.state = CAN_STATE_STOPPED;
104 
105 		if (mode & MSCAN_CSWAI)
106 			setbits8(&regs->canctl0, MSCAN_CSWAI);
107 
108 	} else {
109 		canctl1 = in_8(&regs->canctl1);
110 		if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
111 			clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
112 			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
113 				canctl1 = in_8(&regs->canctl1);
114 				if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
115 					break;
116 			}
117 			if (i >= MSCAN_SET_MODE_RETRIES)
118 				ret = -ENODEV;
119 			else
120 				priv->can.state = CAN_STATE_ERROR_ACTIVE;
121 		}
122 	}
123 	return ret;
124 }
125 
mscan_start(struct net_device * dev)126 static int mscan_start(struct net_device *dev)
127 {
128 	struct mscan_priv *priv = netdev_priv(dev);
129 	struct mscan_regs __iomem *regs = priv->reg_base;
130 	u8 canrflg;
131 	int err;
132 
133 	out_8(&regs->canrier, 0);
134 
135 	INIT_LIST_HEAD(&priv->tx_head);
136 	priv->prev_buf_id = 0;
137 	priv->cur_pri = 0;
138 	priv->tx_active = 0;
139 	priv->shadow_canrier = 0;
140 	priv->flags = 0;
141 
142 	if (priv->type == MSCAN_TYPE_MPC5121) {
143 		/* Clear pending bus-off condition */
144 		if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
145 			out_8(&regs->canmisc, MSCAN_BOHOLD);
146 	}
147 
148 	err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
149 	if (err)
150 		return err;
151 
152 	canrflg = in_8(&regs->canrflg);
153 	priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
154 	priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
155 				    MSCAN_STATE_TX(canrflg))];
156 	out_8(&regs->cantier, 0);
157 
158 	/* Enable receive interrupts. */
159 	out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
160 
161 	return 0;
162 }
163 
mscan_restart(struct net_device * dev)164 static int mscan_restart(struct net_device *dev)
165 {
166 	struct mscan_priv *priv = netdev_priv(dev);
167 
168 	if (priv->type == MSCAN_TYPE_MPC5121) {
169 		struct mscan_regs __iomem *regs = priv->reg_base;
170 
171 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
172 		WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
173 		     "bus-off state expected\n");
174 		out_8(&regs->canmisc, MSCAN_BOHOLD);
175 		/* Re-enable receive interrupts. */
176 		out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
177 	} else {
178 		if (priv->can.state <= CAN_STATE_BUS_OFF)
179 			mscan_set_mode(dev, MSCAN_INIT_MODE);
180 		return mscan_start(dev);
181 	}
182 
183 	return 0;
184 }
185 
mscan_start_xmit(struct sk_buff * skb,struct net_device * dev)186 static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
187 {
188 	struct can_frame *frame = (struct can_frame *)skb->data;
189 	struct mscan_priv *priv = netdev_priv(dev);
190 	struct mscan_regs __iomem *regs = priv->reg_base;
191 	int i, rtr, buf_id;
192 	u32 can_id;
193 
194 	if (can_dropped_invalid_skb(dev, skb))
195 		return NETDEV_TX_OK;
196 
197 	out_8(&regs->cantier, 0);
198 
199 	i = ~priv->tx_active & MSCAN_TXE;
200 	buf_id = ffs(i) - 1;
201 	switch (hweight8(i)) {
202 	case 0:
203 		netif_stop_queue(dev);
204 		netdev_err(dev, "Tx Ring full when queue awake!\n");
205 		return NETDEV_TX_BUSY;
206 	case 1:
207 		/*
208 		 * if buf_id < 3, then current frame will be send out of order,
209 		 * since buffer with lower id have higher priority (hell..)
210 		 */
211 		netif_stop_queue(dev);
212 		fallthrough;
213 	case 2:
214 		if (buf_id < priv->prev_buf_id) {
215 			priv->cur_pri++;
216 			if (priv->cur_pri == 0xff) {
217 				set_bit(F_TX_WAIT_ALL, &priv->flags);
218 				netif_stop_queue(dev);
219 			}
220 		}
221 		set_bit(F_TX_PROGRESS, &priv->flags);
222 		break;
223 	}
224 	priv->prev_buf_id = buf_id;
225 	out_8(&regs->cantbsel, i);
226 
227 	rtr = frame->can_id & CAN_RTR_FLAG;
228 
229 	/* RTR is always the lowest bit of interest, then IDs follow */
230 	if (frame->can_id & CAN_EFF_FLAG) {
231 		can_id = (frame->can_id & CAN_EFF_MASK)
232 			 << (MSCAN_EFF_RTR_SHIFT + 1);
233 		if (rtr)
234 			can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
235 		out_be16(&regs->tx.idr3_2, can_id);
236 
237 		can_id >>= 16;
238 		/* EFF_FLAGS are between the IDs :( */
239 		can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
240 			 | MSCAN_EFF_FLAGS;
241 	} else {
242 		can_id = (frame->can_id & CAN_SFF_MASK)
243 			 << (MSCAN_SFF_RTR_SHIFT + 1);
244 		if (rtr)
245 			can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
246 	}
247 	out_be16(&regs->tx.idr1_0, can_id);
248 
249 	if (!rtr) {
250 		void __iomem *data = &regs->tx.dsr1_0;
251 		u16 *payload = (u16 *)frame->data;
252 
253 		for (i = 0; i < frame->len / 2; i++) {
254 			out_be16(data, *payload++);
255 			data += 2 + _MSCAN_RESERVED_DSR_SIZE;
256 		}
257 		/* write remaining byte if necessary */
258 		if (frame->len & 1)
259 			out_8(data, frame->data[frame->len - 1]);
260 	}
261 
262 	out_8(&regs->tx.dlr, frame->len);
263 	out_8(&regs->tx.tbpr, priv->cur_pri);
264 
265 	/* Start transmission. */
266 	out_8(&regs->cantflg, 1 << buf_id);
267 
268 	if (!test_bit(F_TX_PROGRESS, &priv->flags))
269 		netif_trans_update(dev);
270 
271 	list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
272 
273 	can_put_echo_skb(skb, dev, buf_id, 0);
274 
275 	/* Enable interrupt. */
276 	priv->tx_active |= 1 << buf_id;
277 	out_8(&regs->cantier, priv->tx_active);
278 
279 	return NETDEV_TX_OK;
280 }
281 
get_new_state(struct net_device * dev,u8 canrflg)282 static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
283 {
284 	struct mscan_priv *priv = netdev_priv(dev);
285 
286 	if (unlikely(canrflg & MSCAN_CSCIF))
287 		return state_map[max(MSCAN_STATE_RX(canrflg),
288 				 MSCAN_STATE_TX(canrflg))];
289 
290 	return priv->can.state;
291 }
292 
mscan_get_rx_frame(struct net_device * dev,struct can_frame * frame)293 static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
294 {
295 	struct mscan_priv *priv = netdev_priv(dev);
296 	struct mscan_regs __iomem *regs = priv->reg_base;
297 	u32 can_id;
298 	int i;
299 
300 	can_id = in_be16(&regs->rx.idr1_0);
301 	if (can_id & (1 << 3)) {
302 		frame->can_id = CAN_EFF_FLAG;
303 		can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
304 		can_id = ((can_id & 0xffe00000) |
305 			  ((can_id & 0x7ffff) << 2)) >> 2;
306 	} else {
307 		can_id >>= 4;
308 		frame->can_id = 0;
309 	}
310 
311 	frame->can_id |= can_id >> 1;
312 	if (can_id & 1)
313 		frame->can_id |= CAN_RTR_FLAG;
314 
315 	frame->len = can_cc_dlc2len(in_8(&regs->rx.dlr) & 0xf);
316 
317 	if (!(frame->can_id & CAN_RTR_FLAG)) {
318 		void __iomem *data = &regs->rx.dsr1_0;
319 		u16 *payload = (u16 *)frame->data;
320 
321 		for (i = 0; i < frame->len / 2; i++) {
322 			*payload++ = in_be16(data);
323 			data += 2 + _MSCAN_RESERVED_DSR_SIZE;
324 		}
325 		/* read remaining byte if necessary */
326 		if (frame->len & 1)
327 			frame->data[frame->len - 1] = in_8(data);
328 	}
329 
330 	out_8(&regs->canrflg, MSCAN_RXF);
331 }
332 
mscan_get_err_frame(struct net_device * dev,struct can_frame * frame,u8 canrflg)333 static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
334 				u8 canrflg)
335 {
336 	struct mscan_priv *priv = netdev_priv(dev);
337 	struct mscan_regs __iomem *regs = priv->reg_base;
338 	struct net_device_stats *stats = &dev->stats;
339 	enum can_state new_state;
340 
341 	netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
342 	frame->can_id = CAN_ERR_FLAG;
343 
344 	if (canrflg & MSCAN_OVRIF) {
345 		frame->can_id |= CAN_ERR_CRTL;
346 		frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
347 		stats->rx_over_errors++;
348 		stats->rx_errors++;
349 	} else {
350 		frame->data[1] = 0;
351 	}
352 
353 	new_state = get_new_state(dev, canrflg);
354 	if (new_state != priv->can.state) {
355 		can_change_state(dev, frame,
356 				 state_map[MSCAN_STATE_TX(canrflg)],
357 				 state_map[MSCAN_STATE_RX(canrflg)]);
358 
359 		if (priv->can.state == CAN_STATE_BUS_OFF) {
360 			/*
361 			 * The MSCAN on the MPC5200 does recover from bus-off
362 			 * automatically. To avoid that we stop the chip doing
363 			 * a light-weight stop (we are in irq-context).
364 			 */
365 			if (priv->type != MSCAN_TYPE_MPC5121) {
366 				out_8(&regs->cantier, 0);
367 				out_8(&regs->canrier, 0);
368 				setbits8(&regs->canctl0,
369 					 MSCAN_SLPRQ | MSCAN_INITRQ);
370 			}
371 			can_bus_off(dev);
372 		}
373 	}
374 	priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
375 	frame->len = CAN_ERR_DLC;
376 	out_8(&regs->canrflg, MSCAN_ERR_IF);
377 }
378 
mscan_rx_poll(struct napi_struct * napi,int quota)379 static int mscan_rx_poll(struct napi_struct *napi, int quota)
380 {
381 	struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
382 	struct net_device *dev = napi->dev;
383 	struct mscan_regs __iomem *regs = priv->reg_base;
384 	struct net_device_stats *stats = &dev->stats;
385 	int work_done = 0;
386 	struct sk_buff *skb;
387 	struct can_frame *frame;
388 	u8 canrflg;
389 
390 	while (work_done < quota) {
391 		canrflg = in_8(&regs->canrflg);
392 		if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
393 			break;
394 
395 		skb = alloc_can_skb(dev, &frame);
396 		if (!skb) {
397 			if (printk_ratelimit())
398 				netdev_notice(dev, "packet dropped\n");
399 			stats->rx_dropped++;
400 			out_8(&regs->canrflg, canrflg);
401 			continue;
402 		}
403 
404 		if (canrflg & MSCAN_RXF)
405 			mscan_get_rx_frame(dev, frame);
406 		else if (canrflg & MSCAN_ERR_IF)
407 			mscan_get_err_frame(dev, frame, canrflg);
408 
409 		stats->rx_packets++;
410 		stats->rx_bytes += frame->len;
411 		work_done++;
412 		netif_receive_skb(skb);
413 	}
414 
415 	if (work_done < quota) {
416 		if (likely(napi_complete_done(&priv->napi, work_done))) {
417 			clear_bit(F_RX_PROGRESS, &priv->flags);
418 			if (priv->can.state < CAN_STATE_BUS_OFF)
419 				out_8(&regs->canrier, priv->shadow_canrier);
420 		}
421 	}
422 	return work_done;
423 }
424 
mscan_isr(int irq,void * dev_id)425 static irqreturn_t mscan_isr(int irq, void *dev_id)
426 {
427 	struct net_device *dev = (struct net_device *)dev_id;
428 	struct mscan_priv *priv = netdev_priv(dev);
429 	struct mscan_regs __iomem *regs = priv->reg_base;
430 	struct net_device_stats *stats = &dev->stats;
431 	u8 cantier, cantflg, canrflg;
432 	irqreturn_t ret = IRQ_NONE;
433 
434 	cantier = in_8(&regs->cantier) & MSCAN_TXE;
435 	cantflg = in_8(&regs->cantflg) & cantier;
436 
437 	if (cantier && cantflg) {
438 		struct list_head *tmp, *pos;
439 
440 		list_for_each_safe(pos, tmp, &priv->tx_head) {
441 			struct tx_queue_entry *entry =
442 			    list_entry(pos, struct tx_queue_entry, list);
443 			u8 mask = entry->mask;
444 
445 			if (!(cantflg & mask))
446 				continue;
447 
448 			out_8(&regs->cantbsel, mask);
449 			stats->tx_bytes += in_8(&regs->tx.dlr);
450 			stats->tx_packets++;
451 			can_get_echo_skb(dev, entry->id, NULL);
452 			priv->tx_active &= ~mask;
453 			list_del(pos);
454 		}
455 
456 		if (list_empty(&priv->tx_head)) {
457 			clear_bit(F_TX_WAIT_ALL, &priv->flags);
458 			clear_bit(F_TX_PROGRESS, &priv->flags);
459 			priv->cur_pri = 0;
460 		} else {
461 			netif_trans_update(dev);
462 		}
463 
464 		if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
465 			netif_wake_queue(dev);
466 
467 		out_8(&regs->cantier, priv->tx_active);
468 		ret = IRQ_HANDLED;
469 	}
470 
471 	canrflg = in_8(&regs->canrflg);
472 	if ((canrflg & ~MSCAN_STAT_MSK) &&
473 	    !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
474 		if (canrflg & ~MSCAN_STAT_MSK) {
475 			priv->shadow_canrier = in_8(&regs->canrier);
476 			out_8(&regs->canrier, 0);
477 			napi_schedule(&priv->napi);
478 			ret = IRQ_HANDLED;
479 		} else {
480 			clear_bit(F_RX_PROGRESS, &priv->flags);
481 		}
482 	}
483 	return ret;
484 }
485 
mscan_do_set_mode(struct net_device * dev,enum can_mode mode)486 static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
487 {
488 	int ret = 0;
489 
490 	switch (mode) {
491 	case CAN_MODE_START:
492 		ret = mscan_restart(dev);
493 		if (ret)
494 			break;
495 		if (netif_queue_stopped(dev))
496 			netif_wake_queue(dev);
497 		break;
498 
499 	default:
500 		ret = -EOPNOTSUPP;
501 		break;
502 	}
503 	return ret;
504 }
505 
mscan_do_set_bittiming(struct net_device * dev)506 static int mscan_do_set_bittiming(struct net_device *dev)
507 {
508 	struct mscan_priv *priv = netdev_priv(dev);
509 	struct mscan_regs __iomem *regs = priv->reg_base;
510 	struct can_bittiming *bt = &priv->can.bittiming;
511 	u8 btr0, btr1;
512 
513 	btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
514 	btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
515 		BTR1_SET_TSEG2(bt->phase_seg2) |
516 		BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
517 
518 	netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
519 
520 	out_8(&regs->canbtr0, btr0);
521 	out_8(&regs->canbtr1, btr1);
522 
523 	return 0;
524 }
525 
mscan_get_berr_counter(const struct net_device * dev,struct can_berr_counter * bec)526 static int mscan_get_berr_counter(const struct net_device *dev,
527 				  struct can_berr_counter *bec)
528 {
529 	struct mscan_priv *priv = netdev_priv(dev);
530 	struct mscan_regs __iomem *regs = priv->reg_base;
531 
532 	bec->txerr = in_8(&regs->cantxerr);
533 	bec->rxerr = in_8(&regs->canrxerr);
534 
535 	return 0;
536 }
537 
mscan_open(struct net_device * dev)538 static int mscan_open(struct net_device *dev)
539 {
540 	int ret;
541 	struct mscan_priv *priv = netdev_priv(dev);
542 	struct mscan_regs __iomem *regs = priv->reg_base;
543 
544 	ret = clk_prepare_enable(priv->clk_ipg);
545 	if (ret)
546 		goto exit_retcode;
547 	ret = clk_prepare_enable(priv->clk_can);
548 	if (ret)
549 		goto exit_dis_ipg_clock;
550 
551 	/* common open */
552 	ret = open_candev(dev);
553 	if (ret)
554 		goto exit_dis_can_clock;
555 
556 	napi_enable(&priv->napi);
557 
558 	ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
559 	if (ret < 0) {
560 		netdev_err(dev, "failed to attach interrupt\n");
561 		goto exit_napi_disable;
562 	}
563 
564 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
565 		setbits8(&regs->canctl1, MSCAN_LISTEN);
566 	else
567 		clrbits8(&regs->canctl1, MSCAN_LISTEN);
568 
569 	ret = mscan_start(dev);
570 	if (ret)
571 		goto exit_free_irq;
572 
573 	netif_start_queue(dev);
574 
575 	return 0;
576 
577 exit_free_irq:
578 	free_irq(dev->irq, dev);
579 exit_napi_disable:
580 	napi_disable(&priv->napi);
581 	close_candev(dev);
582 exit_dis_can_clock:
583 	clk_disable_unprepare(priv->clk_can);
584 exit_dis_ipg_clock:
585 	clk_disable_unprepare(priv->clk_ipg);
586 exit_retcode:
587 	return ret;
588 }
589 
mscan_close(struct net_device * dev)590 static int mscan_close(struct net_device *dev)
591 {
592 	struct mscan_priv *priv = netdev_priv(dev);
593 	struct mscan_regs __iomem *regs = priv->reg_base;
594 
595 	netif_stop_queue(dev);
596 	napi_disable(&priv->napi);
597 
598 	out_8(&regs->cantier, 0);
599 	out_8(&regs->canrier, 0);
600 	mscan_set_mode(dev, MSCAN_INIT_MODE);
601 	close_candev(dev);
602 	free_irq(dev->irq, dev);
603 
604 	clk_disable_unprepare(priv->clk_can);
605 	clk_disable_unprepare(priv->clk_ipg);
606 
607 	return 0;
608 }
609 
610 static const struct net_device_ops mscan_netdev_ops = {
611 	.ndo_open	= mscan_open,
612 	.ndo_stop	= mscan_close,
613 	.ndo_start_xmit	= mscan_start_xmit,
614 	.ndo_change_mtu	= can_change_mtu,
615 };
616 
register_mscandev(struct net_device * dev,int mscan_clksrc)617 int register_mscandev(struct net_device *dev, int mscan_clksrc)
618 {
619 	struct mscan_priv *priv = netdev_priv(dev);
620 	struct mscan_regs __iomem *regs = priv->reg_base;
621 	u8 ctl1;
622 
623 	ctl1 = in_8(&regs->canctl1);
624 	if (mscan_clksrc)
625 		ctl1 |= MSCAN_CLKSRC;
626 	else
627 		ctl1 &= ~MSCAN_CLKSRC;
628 
629 	if (priv->type == MSCAN_TYPE_MPC5121) {
630 		priv->can.do_get_berr_counter = mscan_get_berr_counter;
631 		ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
632 	}
633 
634 	ctl1 |= MSCAN_CANE;
635 	out_8(&regs->canctl1, ctl1);
636 	udelay(100);
637 
638 	/* acceptance mask/acceptance code (accept everything) */
639 	out_be16(&regs->canidar1_0, 0);
640 	out_be16(&regs->canidar3_2, 0);
641 	out_be16(&regs->canidar5_4, 0);
642 	out_be16(&regs->canidar7_6, 0);
643 
644 	out_be16(&regs->canidmr1_0, 0xffff);
645 	out_be16(&regs->canidmr3_2, 0xffff);
646 	out_be16(&regs->canidmr5_4, 0xffff);
647 	out_be16(&regs->canidmr7_6, 0xffff);
648 	/* Two 32 bit Acceptance Filters */
649 	out_8(&regs->canidac, MSCAN_AF_32BIT);
650 
651 	mscan_set_mode(dev, MSCAN_INIT_MODE);
652 
653 	return register_candev(dev);
654 }
655 
unregister_mscandev(struct net_device * dev)656 void unregister_mscandev(struct net_device *dev)
657 {
658 	struct mscan_priv *priv = netdev_priv(dev);
659 	struct mscan_regs __iomem *regs = priv->reg_base;
660 	mscan_set_mode(dev, MSCAN_INIT_MODE);
661 	clrbits8(&regs->canctl1, MSCAN_CANE);
662 	unregister_candev(dev);
663 }
664 
alloc_mscandev(void)665 struct net_device *alloc_mscandev(void)
666 {
667 	struct net_device *dev;
668 	struct mscan_priv *priv;
669 	int i;
670 
671 	dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
672 	if (!dev)
673 		return NULL;
674 	priv = netdev_priv(dev);
675 
676 	dev->netdev_ops = &mscan_netdev_ops;
677 
678 	dev->flags |= IFF_ECHO;	/* we support local echo */
679 
680 	netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
681 
682 	priv->can.bittiming_const = &mscan_bittiming_const;
683 	priv->can.do_set_bittiming = mscan_do_set_bittiming;
684 	priv->can.do_set_mode = mscan_do_set_mode;
685 	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
686 		CAN_CTRLMODE_LISTENONLY;
687 
688 	for (i = 0; i < TX_QUEUE_SIZE; i++) {
689 		priv->tx_queue[i].id = i;
690 		priv->tx_queue[i].mask = 1 << i;
691 	}
692 
693 	return dev;
694 }
695 
696 MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
697 MODULE_LICENSE("GPL v2");
698 MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
699