1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments Ethernet Switch Driver ethtool intf
4  *
5  * Copyright (C) 2019 Texas Instruments
6  */
7 
8 #include <linux/if_ether.h>
9 #include <linux/if_vlan.h>
10 #include <linux/kmemleak.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/phy.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/skbuff.h>
17 
18 #include "cpsw.h"
19 #include "cpts.h"
20 #include "cpsw_ale.h"
21 #include "cpsw_priv.h"
22 #include "davinci_cpdma.h"
23 
24 struct cpsw_hw_stats {
25 	u32	rxgoodframes;
26 	u32	rxbroadcastframes;
27 	u32	rxmulticastframes;
28 	u32	rxpauseframes;
29 	u32	rxcrcerrors;
30 	u32	rxaligncodeerrors;
31 	u32	rxoversizedframes;
32 	u32	rxjabberframes;
33 	u32	rxundersizedframes;
34 	u32	rxfragments;
35 	u32	__pad_0[2];
36 	u32	rxoctets;
37 	u32	txgoodframes;
38 	u32	txbroadcastframes;
39 	u32	txmulticastframes;
40 	u32	txpauseframes;
41 	u32	txdeferredframes;
42 	u32	txcollisionframes;
43 	u32	txsinglecollframes;
44 	u32	txmultcollframes;
45 	u32	txexcessivecollisions;
46 	u32	txlatecollisions;
47 	u32	txunderrun;
48 	u32	txcarriersenseerrors;
49 	u32	txoctets;
50 	u32	octetframes64;
51 	u32	octetframes65t127;
52 	u32	octetframes128t255;
53 	u32	octetframes256t511;
54 	u32	octetframes512t1023;
55 	u32	octetframes1024tup;
56 	u32	netoctets;
57 	u32	rxsofoverruns;
58 	u32	rxmofoverruns;
59 	u32	rxdmaoverruns;
60 };
61 
62 struct cpsw_stats {
63 	char stat_string[ETH_GSTRING_LEN];
64 	int type;
65 	int sizeof_stat;
66 	int stat_offset;
67 };
68 
69 enum {
70 	CPSW_STATS,
71 	CPDMA_RX_STATS,
72 	CPDMA_TX_STATS,
73 };
74 
75 #define CPSW_STAT(m)		CPSW_STATS,				\
76 				sizeof_field(struct cpsw_hw_stats, m), \
77 				offsetof(struct cpsw_hw_stats, m)
78 #define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
79 				sizeof_field(struct cpdma_chan_stats, m), \
80 				offsetof(struct cpdma_chan_stats, m)
81 #define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
82 				sizeof_field(struct cpdma_chan_stats, m), \
83 				offsetof(struct cpdma_chan_stats, m)
84 
85 static const struct cpsw_stats cpsw_gstrings_stats[] = {
86 	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
87 	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
88 	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
89 	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
90 	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
91 	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
92 	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
93 	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
94 	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
95 	{ "Rx Fragments", CPSW_STAT(rxfragments) },
96 	{ "Rx Octets", CPSW_STAT(rxoctets) },
97 	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
98 	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
99 	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
100 	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
101 	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
102 	{ "Collisions", CPSW_STAT(txcollisionframes) },
103 	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
104 	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
105 	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
106 	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
107 	{ "Tx Underrun", CPSW_STAT(txunderrun) },
108 	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
109 	{ "Tx Octets", CPSW_STAT(txoctets) },
110 	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
111 	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
112 	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
113 	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
114 	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
115 	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
116 	{ "Net Octets", CPSW_STAT(netoctets) },
117 	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
118 	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
119 	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
120 };
121 
122 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
123 	{ "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
124 	{ "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
125 	{ "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
126 	{ "misqueued", CPDMA_RX_STAT(misqueued) },
127 	{ "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
128 	{ "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
129 	{ "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
130 	{ "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
131 	{ "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
132 	{ "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
133 	{ "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
134 	{ "requeue", CPDMA_RX_STAT(requeue) },
135 	{ "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
136 };
137 
138 #define CPSW_STATS_COMMON_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
139 #define CPSW_STATS_CH_LEN	ARRAY_SIZE(cpsw_gstrings_ch_stats)
140 
cpsw_get_msglevel(struct net_device * ndev)141 u32 cpsw_get_msglevel(struct net_device *ndev)
142 {
143 	struct cpsw_priv *priv = netdev_priv(ndev);
144 
145 	return priv->msg_enable;
146 }
147 
cpsw_set_msglevel(struct net_device * ndev,u32 value)148 void cpsw_set_msglevel(struct net_device *ndev, u32 value)
149 {
150 	struct cpsw_priv *priv = netdev_priv(ndev);
151 
152 	priv->msg_enable = value;
153 }
154 
cpsw_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)155 int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
156 		      struct kernel_ethtool_coalesce *kernel_coal,
157 		      struct netlink_ext_ack *extack)
158 {
159 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
160 
161 	coal->rx_coalesce_usecs = cpsw->coal_intvl;
162 	return 0;
163 }
164 
cpsw_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)165 int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
166 		      struct kernel_ethtool_coalesce *kernel_coal,
167 		      struct netlink_ext_ack *extack)
168 {
169 	struct cpsw_priv *priv = netdev_priv(ndev);
170 	u32 int_ctrl;
171 	u32 num_interrupts = 0;
172 	u32 prescale = 0;
173 	u32 addnl_dvdr = 1;
174 	u32 coal_intvl = 0;
175 	struct cpsw_common *cpsw = priv->cpsw;
176 
177 	coal_intvl = coal->rx_coalesce_usecs;
178 
179 	int_ctrl =  readl(&cpsw->wr_regs->int_control);
180 	prescale = cpsw->bus_freq_mhz * 4;
181 
182 	if (!coal->rx_coalesce_usecs) {
183 		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
184 		goto update_return;
185 	}
186 
187 	if (coal_intvl < CPSW_CMINTMIN_INTVL)
188 		coal_intvl = CPSW_CMINTMIN_INTVL;
189 
190 	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
191 		/* Interrupt pacer works with 4us Pulse, we can
192 		 * throttle further by dilating the 4us pulse.
193 		 */
194 		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
195 
196 		if (addnl_dvdr > 1) {
197 			prescale *= addnl_dvdr;
198 			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
199 				coal_intvl = (CPSW_CMINTMAX_INTVL
200 						* addnl_dvdr);
201 		} else {
202 			addnl_dvdr = 1;
203 			coal_intvl = CPSW_CMINTMAX_INTVL;
204 		}
205 	}
206 
207 	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
208 	writel(num_interrupts, &cpsw->wr_regs->rx_imax);
209 	writel(num_interrupts, &cpsw->wr_regs->tx_imax);
210 
211 	int_ctrl |= CPSW_INTPACEEN;
212 	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
213 	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
214 
215 update_return:
216 	writel(int_ctrl, &cpsw->wr_regs->int_control);
217 
218 	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
219 	cpsw->coal_intvl = coal_intvl;
220 
221 	return 0;
222 }
223 
cpsw_get_sset_count(struct net_device * ndev,int sset)224 int cpsw_get_sset_count(struct net_device *ndev, int sset)
225 {
226 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
227 
228 	switch (sset) {
229 	case ETH_SS_STATS:
230 		return (CPSW_STATS_COMMON_LEN +
231 		       (cpsw->rx_ch_num + cpsw->tx_ch_num) *
232 		       CPSW_STATS_CH_LEN);
233 	default:
234 		return -EOPNOTSUPP;
235 	}
236 }
237 
cpsw_add_ch_strings(u8 ** p,int ch_num,int rx_dir)238 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
239 {
240 	int ch_stats_len;
241 	int line;
242 	int i;
243 
244 	ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
245 	for (i = 0; i < ch_stats_len; i++) {
246 		line = i % CPSW_STATS_CH_LEN;
247 		snprintf(*p, ETH_GSTRING_LEN,
248 			 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
249 			 (long)(i / CPSW_STATS_CH_LEN),
250 			 cpsw_gstrings_ch_stats[line].stat_string);
251 		*p += ETH_GSTRING_LEN;
252 	}
253 }
254 
cpsw_get_strings(struct net_device * ndev,u32 stringset,u8 * data)255 void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
256 {
257 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
258 	u8 *p = data;
259 	int i;
260 
261 	switch (stringset) {
262 	case ETH_SS_STATS:
263 		for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
264 			memcpy(p, cpsw_gstrings_stats[i].stat_string,
265 			       ETH_GSTRING_LEN);
266 			p += ETH_GSTRING_LEN;
267 		}
268 
269 		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
270 		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
271 		break;
272 	}
273 }
274 
cpsw_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * stats,u64 * data)275 void cpsw_get_ethtool_stats(struct net_device *ndev,
276 			    struct ethtool_stats *stats, u64 *data)
277 {
278 	u8 *p;
279 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
280 	struct cpdma_chan_stats ch_stats;
281 	int i, l, ch;
282 
283 	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
284 	for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
285 		data[l] = readl(cpsw->hw_stats +
286 				cpsw_gstrings_stats[l].stat_offset);
287 
288 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
289 		cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
290 		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
291 			p = (u8 *)&ch_stats +
292 				cpsw_gstrings_ch_stats[i].stat_offset;
293 			data[l] = *(u32 *)p;
294 		}
295 	}
296 
297 	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
298 		cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
299 		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
300 			p = (u8 *)&ch_stats +
301 				cpsw_gstrings_ch_stats[i].stat_offset;
302 			data[l] = *(u32 *)p;
303 		}
304 	}
305 }
306 
cpsw_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)307 void cpsw_get_pauseparam(struct net_device *ndev,
308 			 struct ethtool_pauseparam *pause)
309 {
310 	struct cpsw_priv *priv = netdev_priv(ndev);
311 
312 	pause->autoneg = AUTONEG_DISABLE;
313 	pause->rx_pause = priv->rx_pause ? true : false;
314 	pause->tx_pause = priv->tx_pause ? true : false;
315 }
316 
cpsw_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)317 void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
318 {
319 	struct cpsw_priv *priv = netdev_priv(ndev);
320 	struct cpsw_common *cpsw = priv->cpsw;
321 	int slave_no = cpsw_slave_index(cpsw, priv);
322 
323 	wol->supported = 0;
324 	wol->wolopts = 0;
325 
326 	if (cpsw->slaves[slave_no].phy)
327 		phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
328 }
329 
cpsw_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)330 int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
331 {
332 	struct cpsw_priv *priv = netdev_priv(ndev);
333 	struct cpsw_common *cpsw = priv->cpsw;
334 	int slave_no = cpsw_slave_index(cpsw, priv);
335 
336 	if (cpsw->slaves[slave_no].phy)
337 		return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
338 	else
339 		return -EOPNOTSUPP;
340 }
341 
cpsw_get_regs_len(struct net_device * ndev)342 int cpsw_get_regs_len(struct net_device *ndev)
343 {
344 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
345 
346 	return cpsw_ale_get_num_entries(cpsw->ale) *
347 	       ALE_ENTRY_WORDS * sizeof(u32);
348 }
349 
cpsw_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * p)350 void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
351 {
352 	u32 *reg = p;
353 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
354 
355 	/* update CPSW IP version */
356 	regs->version = cpsw->version;
357 
358 	cpsw_ale_dump(cpsw->ale, reg);
359 }
360 
cpsw_ethtool_op_begin(struct net_device * ndev)361 int cpsw_ethtool_op_begin(struct net_device *ndev)
362 {
363 	struct cpsw_priv *priv = netdev_priv(ndev);
364 	struct cpsw_common *cpsw = priv->cpsw;
365 	int ret;
366 
367 	ret = pm_runtime_get_sync(cpsw->dev);
368 	if (ret < 0) {
369 		cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
370 		pm_runtime_put_noidle(cpsw->dev);
371 	}
372 
373 	return ret;
374 }
375 
cpsw_ethtool_op_complete(struct net_device * ndev)376 void cpsw_ethtool_op_complete(struct net_device *ndev)
377 {
378 	struct cpsw_priv *priv = netdev_priv(ndev);
379 	int ret;
380 
381 	ret = pm_runtime_put(priv->cpsw->dev);
382 	if (ret < 0)
383 		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
384 }
385 
cpsw_get_channels(struct net_device * ndev,struct ethtool_channels * ch)386 void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
387 {
388 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
389 
390 	ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
391 	ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
392 	ch->max_combined = 0;
393 	ch->max_other = 0;
394 	ch->other_count = 0;
395 	ch->rx_count = cpsw->rx_ch_num;
396 	ch->tx_count = cpsw->tx_ch_num;
397 	ch->combined_count = 0;
398 }
399 
cpsw_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * ecmd)400 int cpsw_get_link_ksettings(struct net_device *ndev,
401 			    struct ethtool_link_ksettings *ecmd)
402 {
403 	struct cpsw_priv *priv = netdev_priv(ndev);
404 	struct cpsw_common *cpsw = priv->cpsw;
405 	int slave_no = cpsw_slave_index(cpsw, priv);
406 
407 	if (!cpsw->slaves[slave_no].phy)
408 		return -EOPNOTSUPP;
409 
410 	phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
411 	return 0;
412 }
413 
cpsw_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * ecmd)414 int cpsw_set_link_ksettings(struct net_device *ndev,
415 			    const struct ethtool_link_ksettings *ecmd)
416 {
417 	struct cpsw_priv *priv = netdev_priv(ndev);
418 	struct cpsw_common *cpsw = priv->cpsw;
419 	int slave_no = cpsw_slave_index(cpsw, priv);
420 
421 	if (!cpsw->slaves[slave_no].phy)
422 		return -EOPNOTSUPP;
423 
424 	return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
425 }
426 
cpsw_get_eee(struct net_device * ndev,struct ethtool_eee * edata)427 int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
428 {
429 	struct cpsw_priv *priv = netdev_priv(ndev);
430 	struct cpsw_common *cpsw = priv->cpsw;
431 	int slave_no = cpsw_slave_index(cpsw, priv);
432 
433 	if (cpsw->slaves[slave_no].phy)
434 		return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
435 	else
436 		return -EOPNOTSUPP;
437 }
438 
cpsw_set_eee(struct net_device * ndev,struct ethtool_eee * edata)439 int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
440 {
441 	struct cpsw_priv *priv = netdev_priv(ndev);
442 	struct cpsw_common *cpsw = priv->cpsw;
443 	int slave_no = cpsw_slave_index(cpsw, priv);
444 
445 	if (cpsw->slaves[slave_no].phy)
446 		return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
447 	else
448 		return -EOPNOTSUPP;
449 }
450 
cpsw_nway_reset(struct net_device * ndev)451 int cpsw_nway_reset(struct net_device *ndev)
452 {
453 	struct cpsw_priv *priv = netdev_priv(ndev);
454 	struct cpsw_common *cpsw = priv->cpsw;
455 	int slave_no = cpsw_slave_index(cpsw, priv);
456 
457 	if (cpsw->slaves[slave_no].phy)
458 		return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
459 	else
460 		return -EOPNOTSUPP;
461 }
462 
cpsw_suspend_data_pass(struct net_device * ndev)463 static void cpsw_suspend_data_pass(struct net_device *ndev)
464 {
465 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
466 	int i;
467 
468 	/* Disable NAPI scheduling */
469 	cpsw_intr_disable(cpsw);
470 
471 	/* Stop all transmit queues for every network device.
472 	 */
473 	for (i = 0; i < cpsw->data.slaves; i++) {
474 		ndev = cpsw->slaves[i].ndev;
475 		if (!(ndev && netif_running(ndev)))
476 			continue;
477 
478 		netif_tx_stop_all_queues(ndev);
479 
480 		/* Barrier, so that stop_queue visible to other cpus */
481 		smp_mb__after_atomic();
482 	}
483 
484 	/* Handle rest of tx packets and stop cpdma channels */
485 	cpdma_ctlr_stop(cpsw->dma);
486 }
487 
cpsw_resume_data_pass(struct net_device * ndev)488 static int cpsw_resume_data_pass(struct net_device *ndev)
489 {
490 	struct cpsw_priv *priv = netdev_priv(ndev);
491 	struct cpsw_common *cpsw = priv->cpsw;
492 	int i, ret;
493 
494 	/* After this receive is started */
495 	if (cpsw->usage_count) {
496 		ret = cpsw_fill_rx_channels(priv);
497 		if (ret)
498 			return ret;
499 
500 		cpdma_ctlr_start(cpsw->dma);
501 		cpsw_intr_enable(cpsw);
502 	}
503 
504 	/* Resume transmit for every affected interface */
505 	for (i = 0; i < cpsw->data.slaves; i++) {
506 		ndev = cpsw->slaves[i].ndev;
507 		if (ndev && netif_running(ndev))
508 			netif_tx_start_all_queues(ndev);
509 	}
510 
511 	return 0;
512 }
513 
cpsw_check_ch_settings(struct cpsw_common * cpsw,struct ethtool_channels * ch)514 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
515 				  struct ethtool_channels *ch)
516 {
517 	if (cpsw->quirk_irq) {
518 		dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
519 		return -EOPNOTSUPP;
520 	}
521 
522 	if (ch->combined_count)
523 		return -EINVAL;
524 
525 	/* verify we have at least one channel in each direction */
526 	if (!ch->rx_count || !ch->tx_count)
527 		return -EINVAL;
528 
529 	if (ch->rx_count > cpsw->data.channels ||
530 	    ch->tx_count > cpsw->data.channels)
531 		return -EINVAL;
532 
533 	return 0;
534 }
535 
cpsw_update_channels_res(struct cpsw_priv * priv,int ch_num,int rx,cpdma_handler_fn rx_handler)536 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
537 				    cpdma_handler_fn rx_handler)
538 {
539 	struct cpsw_common *cpsw = priv->cpsw;
540 	void (*handler)(void *, int, int);
541 	struct netdev_queue *queue;
542 	struct cpsw_vector *vec;
543 	int ret, *ch, vch;
544 
545 	if (rx) {
546 		ch = &cpsw->rx_ch_num;
547 		vec = cpsw->rxv;
548 		handler = rx_handler;
549 	} else {
550 		ch = &cpsw->tx_ch_num;
551 		vec = cpsw->txv;
552 		handler = cpsw_tx_handler;
553 	}
554 
555 	while (*ch < ch_num) {
556 		vch = rx ? *ch : 7 - *ch;
557 		vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
558 		queue = netdev_get_tx_queue(priv->ndev, *ch);
559 		queue->tx_maxrate = 0;
560 
561 		if (IS_ERR(vec[*ch].ch))
562 			return PTR_ERR(vec[*ch].ch);
563 
564 		if (!vec[*ch].ch)
565 			return -EINVAL;
566 
567 		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
568 			  (rx ? "rx" : "tx"));
569 		(*ch)++;
570 	}
571 
572 	while (*ch > ch_num) {
573 		(*ch)--;
574 
575 		ret = cpdma_chan_destroy(vec[*ch].ch);
576 		if (ret)
577 			return ret;
578 
579 		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
580 			  (rx ? "rx" : "tx"));
581 	}
582 
583 	return 0;
584 }
585 
cpsw_fail(struct cpsw_common * cpsw)586 static void cpsw_fail(struct cpsw_common *cpsw)
587 {
588 	struct net_device *ndev;
589 	int i;
590 
591 	for (i = 0; i < cpsw->data.slaves; i++) {
592 		ndev = cpsw->slaves[i].ndev;
593 		if (ndev)
594 			dev_close(ndev);
595 	}
596 }
597 
cpsw_set_channels_common(struct net_device * ndev,struct ethtool_channels * chs,cpdma_handler_fn rx_handler)598 int cpsw_set_channels_common(struct net_device *ndev,
599 			     struct ethtool_channels *chs,
600 			     cpdma_handler_fn rx_handler)
601 {
602 	struct cpsw_priv *priv = netdev_priv(ndev);
603 	struct cpsw_common *cpsw = priv->cpsw;
604 	struct net_device *sl_ndev;
605 	int i, new_pools, ret;
606 
607 	ret = cpsw_check_ch_settings(cpsw, chs);
608 	if (ret < 0)
609 		return ret;
610 
611 	cpsw_suspend_data_pass(ndev);
612 
613 	new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
614 
615 	ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
616 	if (ret)
617 		goto err;
618 
619 	ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler);
620 	if (ret)
621 		goto err;
622 
623 	for (i = 0; i < cpsw->data.slaves; i++) {
624 		sl_ndev = cpsw->slaves[i].ndev;
625 		if (!(sl_ndev && netif_running(sl_ndev)))
626 			continue;
627 
628 		/* Inform stack about new count of queues */
629 		ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num);
630 		if (ret) {
631 			dev_err(priv->dev, "cannot set real number of tx queues\n");
632 			goto err;
633 		}
634 
635 		ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num);
636 		if (ret) {
637 			dev_err(priv->dev, "cannot set real number of rx queues\n");
638 			goto err;
639 		}
640 	}
641 
642 	cpsw_split_res(cpsw);
643 
644 	if (new_pools) {
645 		cpsw_destroy_xdp_rxqs(cpsw);
646 		ret = cpsw_create_xdp_rxqs(cpsw);
647 		if (ret)
648 			goto err;
649 	}
650 
651 	ret = cpsw_resume_data_pass(ndev);
652 	if (!ret)
653 		return 0;
654 err:
655 	dev_err(priv->dev, "cannot update channels number, closing device\n");
656 	cpsw_fail(cpsw);
657 	return ret;
658 }
659 
cpsw_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering)660 void cpsw_get_ringparam(struct net_device *ndev,
661 			struct ethtool_ringparam *ering)
662 {
663 	struct cpsw_priv *priv = netdev_priv(ndev);
664 	struct cpsw_common *cpsw = priv->cpsw;
665 
666 	/* not supported */
667 	ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
668 	ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
669 	ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
670 	ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
671 }
672 
cpsw_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering)673 int cpsw_set_ringparam(struct net_device *ndev,
674 		       struct ethtool_ringparam *ering)
675 {
676 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
677 	int descs_num, ret;
678 
679 	/* ignore ering->tx_pending - only rx_pending adjustment is supported */
680 
681 	if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
682 	    ering->rx_pending < CPSW_MAX_QUEUES ||
683 	    ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
684 		return -EINVAL;
685 
686 	descs_num = cpdma_get_num_rx_descs(cpsw->dma);
687 	if (ering->rx_pending == descs_num)
688 		return 0;
689 
690 	cpsw_suspend_data_pass(ndev);
691 
692 	ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
693 	if (ret) {
694 		if (cpsw_resume_data_pass(ndev))
695 			goto err;
696 
697 		return ret;
698 	}
699 
700 	if (cpsw->usage_count) {
701 		cpsw_destroy_xdp_rxqs(cpsw);
702 		ret = cpsw_create_xdp_rxqs(cpsw);
703 		if (ret)
704 			goto err;
705 	}
706 
707 	ret = cpsw_resume_data_pass(ndev);
708 	if (!ret)
709 		return 0;
710 err:
711 	cpdma_set_num_rx_descs(cpsw->dma, descs_num);
712 	dev_err(cpsw->dev, "cannot set ring params, closing device\n");
713 	cpsw_fail(cpsw);
714 	return ret;
715 }
716 
717 #if IS_ENABLED(CONFIG_TI_CPTS)
cpsw_get_ts_info(struct net_device * ndev,struct ethtool_ts_info * info)718 int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
719 {
720 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
721 
722 	info->so_timestamping =
723 		SOF_TIMESTAMPING_TX_HARDWARE |
724 		SOF_TIMESTAMPING_TX_SOFTWARE |
725 		SOF_TIMESTAMPING_RX_HARDWARE |
726 		SOF_TIMESTAMPING_RX_SOFTWARE |
727 		SOF_TIMESTAMPING_SOFTWARE |
728 		SOF_TIMESTAMPING_RAW_HARDWARE;
729 	info->phc_index = cpsw->cpts->phc_index;
730 	info->tx_types =
731 		(1 << HWTSTAMP_TX_OFF) |
732 		(1 << HWTSTAMP_TX_ON);
733 	info->rx_filters =
734 		(1 << HWTSTAMP_FILTER_NONE) |
735 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
736 	return 0;
737 }
738 #else
cpsw_get_ts_info(struct net_device * ndev,struct ethtool_ts_info * info)739 int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
740 {
741 	info->so_timestamping =
742 		SOF_TIMESTAMPING_TX_SOFTWARE |
743 		SOF_TIMESTAMPING_RX_SOFTWARE |
744 		SOF_TIMESTAMPING_SOFTWARE;
745 	info->phc_index = -1;
746 	info->tx_types = 0;
747 	info->rx_filters = 0;
748 	return 0;
749 }
750 #endif
751