1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2011 Michal Simek
4 *
5 * Michal SIMEK <monstr@monstr.eu>
6 *
7 * Based on Xilinx gmac driver:
8 * (C) Copyright 2011 Xilinx
9 */
10
11 #include <clk.h>
12 #include <common.h>
13 #include <cpu_func.h>
14 #include <dm.h>
15 #include <log.h>
16 #include <net.h>
17 #include <netdev.h>
18 #include <config.h>
19 #include <console.h>
20 #include <malloc.h>
21 #include <asm/cache.h>
22 #include <asm/io.h>
23 #include <phy.h>
24 #include <miiphy.h>
25 #include <wait_bit.h>
26 #include <watchdog.h>
27 #include <asm/system.h>
28 #include <asm/arch/hardware.h>
29 #include <asm/arch/sys_proto.h>
30 #include <dm/device_compat.h>
31 #include <linux/bitops.h>
32 #include <linux/err.h>
33 #include <linux/errno.h>
34
35 /* Bit/mask specification */
36 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
37 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
38 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
39 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
40 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
41
42 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
43 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
44 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
45
46 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
47 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
48 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
49
50 /* Wrap bit, last descriptor */
51 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
52 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
53 #define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
54
55 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
56 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
57 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
58 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
59
60 #define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */
61 #define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */
62 #define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */
63 #define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */
64 #define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */
65 #define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */
66 #ifdef CONFIG_ARM64
67 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000 /* Div pclk by 64, max 160MHz */
68 #else
69 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000 /* Div pclk by 48, max 120MHz */
70 #endif
71
72 #ifdef CONFIG_ARM64
73 # define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
74 #else
75 # define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
76 #endif
77
78 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
79 ZYNQ_GEM_NWCFG_FDEN | \
80 ZYNQ_GEM_NWCFG_FSREM | \
81 ZYNQ_GEM_NWCFG_MDCCLKDIV)
82
83 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
84
85 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
86 /* Use full configured addressable space (8 Kb) */
87 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
88 /* Use full configured addressable space (4 Kb) */
89 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
90 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
91 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000
92
93 #if defined(CONFIG_PHYS_64BIT)
94 # define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */
95 #else
96 # define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */
97 #endif
98
99 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
100 ZYNQ_GEM_DMACR_RXSIZE | \
101 ZYNQ_GEM_DMACR_TXSIZE | \
102 ZYNQ_GEM_DMACR_RXBUF | \
103 ZYNQ_GEM_DMA_BUS_WIDTH)
104
105 #define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
106
107 #define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
108
109 #define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
110
111 /* Use MII register 1 (MII status register) to detect PHY */
112 #define PHY_DETECT_REG 1
113
114 /* Mask used to verify certain PHY features (or register contents)
115 * in the register above:
116 * 0x1000: 10Mbps full duplex support
117 * 0x0800: 10Mbps half duplex support
118 * 0x0008: Auto-negotiation support
119 */
120 #define PHY_DETECT_MASK 0x1808
121
122 /* TX BD status masks */
123 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
124 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
125 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
126
127 /* Clock frequencies for different speeds */
128 #define ZYNQ_GEM_FREQUENCY_10 2500000UL
129 #define ZYNQ_GEM_FREQUENCY_100 25000000UL
130 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL
131
132 #define RXCLK_EN BIT(0)
133
134 /* Device registers */
135 struct zynq_gem_regs {
136 u32 nwctrl; /* 0x0 - Network Control reg */
137 u32 nwcfg; /* 0x4 - Network Config reg */
138 u32 nwsr; /* 0x8 - Network Status reg */
139 u32 reserved1;
140 u32 dmacr; /* 0x10 - DMA Control reg */
141 u32 txsr; /* 0x14 - TX Status reg */
142 u32 rxqbase; /* 0x18 - RX Q Base address reg */
143 u32 txqbase; /* 0x1c - TX Q Base address reg */
144 u32 rxsr; /* 0x20 - RX Status reg */
145 u32 reserved2[2];
146 u32 idr; /* 0x2c - Interrupt Disable reg */
147 u32 reserved3;
148 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
149 u32 reserved4[18];
150 u32 hashl; /* 0x80 - Hash Low address reg */
151 u32 hashh; /* 0x84 - Hash High address reg */
152 #define LADDR_LOW 0
153 #define LADDR_HIGH 1
154 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
155 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
156 u32 reserved6[18];
157 #define STAT_SIZE 44
158 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
159 u32 reserved9[20];
160 u32 pcscntrl;
161 u32 rserved12[36];
162 u32 dcfg6; /* 0x294 Design config reg6 */
163 u32 reserved7[106];
164 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
165 u32 reserved8[15];
166 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
167 u32 reserved10[17];
168 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
169 u32 reserved11[2];
170 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
171 };
172
173 /* BD descriptors */
174 struct emac_bd {
175 u32 addr; /* Next descriptor pointer */
176 u32 status;
177 #if defined(CONFIG_PHYS_64BIT)
178 u32 addr_hi;
179 u32 reserved;
180 #endif
181 };
182
183 /* Reduce amount of BUFs if you have limited amount of memory */
184 #define RX_BUF 32
185 /* Page table entries are set to 1MB, or multiples of 1MB
186 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
187 */
188 #define BD_SPACE 0x100000
189 /* BD separation space */
190 #define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
191
192 /* Setup the first free TX descriptor */
193 #define TX_FREE_DESC 2
194
195 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
196 struct zynq_gem_priv {
197 struct emac_bd *tx_bd;
198 struct emac_bd *rx_bd;
199 char *rxbuffers;
200 u32 rxbd_current;
201 u32 rx_first_buf;
202 int phyaddr;
203 int init;
204 struct zynq_gem_regs *iobase;
205 struct zynq_gem_regs *mdiobase;
206 phy_interface_t interface;
207 struct phy_device *phydev;
208 ofnode phy_of_node;
209 struct mii_dev *bus;
210 struct clk rx_clk;
211 struct clk tx_clk;
212 u32 max_speed;
213 bool int_pcs;
214 bool dma_64bit;
215 u32 clk_en_info;
216 };
217
phy_setup_op(struct zynq_gem_priv * priv,u32 phy_addr,u32 regnum,u32 op,u16 * data)218 static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
219 u32 op, u16 *data)
220 {
221 u32 mgtcr;
222 struct zynq_gem_regs *regs = priv->mdiobase;
223 int err;
224
225 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
226 true, 20000, false);
227 if (err)
228 return err;
229
230 /* Construct mgtcr mask for the operation */
231 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
232 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
233 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
234
235 /* Write mgtcr and wait for completion */
236 writel(mgtcr, ®s->phymntnc);
237
238 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
239 true, 20000, false);
240 if (err)
241 return err;
242
243 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
244 *data = readl(®s->phymntnc);
245
246 return 0;
247 }
248
phyread(struct zynq_gem_priv * priv,u32 phy_addr,u32 regnum,u16 * val)249 static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
250 u32 regnum, u16 *val)
251 {
252 int ret;
253
254 ret = phy_setup_op(priv, phy_addr, regnum,
255 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
256
257 if (!ret)
258 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
259 phy_addr, regnum, *val);
260
261 return ret;
262 }
263
phywrite(struct zynq_gem_priv * priv,u32 phy_addr,u32 regnum,u16 data)264 static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
265 u32 regnum, u16 data)
266 {
267 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
268 regnum, data);
269
270 return phy_setup_op(priv, phy_addr, regnum,
271 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
272 }
273
zynq_gem_setup_mac(struct udevice * dev)274 static int zynq_gem_setup_mac(struct udevice *dev)
275 {
276 u32 i, macaddrlow, macaddrhigh;
277 struct eth_pdata *pdata = dev_get_plat(dev);
278 struct zynq_gem_priv *priv = dev_get_priv(dev);
279 struct zynq_gem_regs *regs = priv->iobase;
280
281 /* Set the MAC bits [31:0] in BOT */
282 macaddrlow = pdata->enetaddr[0];
283 macaddrlow |= pdata->enetaddr[1] << 8;
284 macaddrlow |= pdata->enetaddr[2] << 16;
285 macaddrlow |= pdata->enetaddr[3] << 24;
286
287 /* Set MAC bits [47:32] in TOP */
288 macaddrhigh = pdata->enetaddr[4];
289 macaddrhigh |= pdata->enetaddr[5] << 8;
290
291 for (i = 0; i < 4; i++) {
292 writel(0, ®s->laddr[i][LADDR_LOW]);
293 writel(0, ®s->laddr[i][LADDR_HIGH]);
294 /* Do not use MATCHx register */
295 writel(0, ®s->match[i]);
296 }
297
298 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]);
299 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]);
300
301 return 0;
302 }
303
zynq_phy_init(struct udevice * dev)304 static int zynq_phy_init(struct udevice *dev)
305 {
306 int ret;
307 struct zynq_gem_priv *priv = dev_get_priv(dev);
308 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
309 const u32 supported = SUPPORTED_10baseT_Half |
310 SUPPORTED_10baseT_Full |
311 SUPPORTED_100baseT_Half |
312 SUPPORTED_100baseT_Full |
313 SUPPORTED_1000baseT_Half |
314 SUPPORTED_1000baseT_Full;
315
316 /* Enable only MDIO bus */
317 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, ®s_mdio->nwctrl);
318
319 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
320 priv->interface);
321 if (!priv->phydev)
322 return -ENODEV;
323
324 if (priv->max_speed) {
325 ret = phy_set_supported(priv->phydev, priv->max_speed);
326 if (ret)
327 return ret;
328 }
329
330 priv->phydev->supported &= supported | ADVERTISED_Pause |
331 ADVERTISED_Asym_Pause;
332
333 priv->phydev->advertising = priv->phydev->supported;
334 priv->phydev->node = priv->phy_of_node;
335
336 return phy_config(priv->phydev);
337 }
338
zynq_gem_init(struct udevice * dev)339 static int zynq_gem_init(struct udevice *dev)
340 {
341 u32 i, nwconfig;
342 int ret;
343 unsigned long clk_rate = 0;
344 struct zynq_gem_priv *priv = dev_get_priv(dev);
345 struct zynq_gem_regs *regs = priv->iobase;
346 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
347 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
348 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
349
350 if (readl(®s->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
351 priv->dma_64bit = true;
352 else
353 priv->dma_64bit = false;
354
355 #if defined(CONFIG_PHYS_64BIT)
356 if (!priv->dma_64bit) {
357 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
358 __func__);
359 return -EINVAL;
360 }
361 #else
362 if (priv->dma_64bit)
363 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
364 __func__);
365 #endif
366
367 if (!priv->init) {
368 /* Disable all interrupts */
369 writel(0xFFFFFFFF, ®s->idr);
370
371 /* Disable the receiver & transmitter */
372 writel(0, ®s->nwctrl);
373 writel(0, ®s->txsr);
374 writel(0, ®s->rxsr);
375 writel(0, ®s->phymntnc);
376
377 /* Clear the Hash registers for the mac address
378 * pointed by AddressPtr
379 */
380 writel(0x0, ®s->hashl);
381 /* Write bits [63:32] in TOP */
382 writel(0x0, ®s->hashh);
383
384 /* Clear all counters */
385 for (i = 0; i < STAT_SIZE; i++)
386 readl(®s->stat[i]);
387
388 /* Setup RxBD space */
389 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
390
391 for (i = 0; i < RX_BUF; i++) {
392 priv->rx_bd[i].status = 0xF0000000;
393 priv->rx_bd[i].addr =
394 (lower_32_bits((ulong)(priv->rxbuffers)
395 + (i * PKTSIZE_ALIGN)));
396 #if defined(CONFIG_PHYS_64BIT)
397 priv->rx_bd[i].addr_hi =
398 (upper_32_bits((ulong)(priv->rxbuffers)
399 + (i * PKTSIZE_ALIGN)));
400 #endif
401 }
402 /* WRAP bit to last BD */
403 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
404 /* Write RxBDs to IP */
405 writel(lower_32_bits((ulong)priv->rx_bd), ®s->rxqbase);
406 #if defined(CONFIG_PHYS_64BIT)
407 writel(upper_32_bits((ulong)priv->rx_bd), ®s->upper_rxqbase);
408 #endif
409
410 /* Setup for DMA Configuration register */
411 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr);
412
413 /* Setup for Network Control register, MDIO, Rx and Tx enable */
414 setbits_le32(®s_mdio->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
415
416 /* Disable the second priority queue */
417 dummy_tx_bd->addr = 0;
418 #if defined(CONFIG_PHYS_64BIT)
419 dummy_tx_bd->addr_hi = 0;
420 #endif
421 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
422 ZYNQ_GEM_TXBUF_LAST_MASK|
423 ZYNQ_GEM_TXBUF_USED_MASK;
424
425 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
426 ZYNQ_GEM_RXBUF_NEW_MASK;
427 #if defined(CONFIG_PHYS_64BIT)
428 dummy_rx_bd->addr_hi = 0;
429 #endif
430 dummy_rx_bd->status = 0;
431
432 writel((ulong)dummy_tx_bd, ®s->transmit_q1_ptr);
433 writel((ulong)dummy_rx_bd, ®s->receive_q1_ptr);
434
435 priv->init++;
436 }
437
438 ret = phy_startup(priv->phydev);
439 if (ret)
440 return ret;
441
442 if (!priv->phydev->link) {
443 printf("%s: No link.\n", priv->phydev->dev->name);
444 return -1;
445 }
446
447 nwconfig = ZYNQ_GEM_NWCFG_INIT;
448
449 /*
450 * Set SGMII enable PCS selection only if internal PCS/PMA
451 * core is used and interface is SGMII.
452 */
453 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
454 priv->int_pcs) {
455 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
456 ZYNQ_GEM_NWCFG_PCS_SEL;
457 #ifdef CONFIG_ARM64
458 if (priv->phydev->phy_id != PHY_FIXED_ID)
459 writel(readl(®s->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
460 ®s->pcscntrl);
461 else
462 writel(readl(®s->pcscntrl) & ~ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
463 ®s->pcscntrl);
464 #endif
465 }
466
467 switch (priv->phydev->speed) {
468 case SPEED_1000:
469 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
470 ®s->nwcfg);
471 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
472 break;
473 case SPEED_100:
474 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
475 ®s->nwcfg);
476 clk_rate = ZYNQ_GEM_FREQUENCY_100;
477 break;
478 case SPEED_10:
479 clk_rate = ZYNQ_GEM_FREQUENCY_10;
480 break;
481 }
482
483 ret = clk_set_rate(&priv->tx_clk, clk_rate);
484 if (IS_ERR_VALUE(ret)) {
485 dev_err(dev, "failed to set tx clock rate\n");
486 return ret;
487 }
488
489 ret = clk_enable(&priv->tx_clk);
490 if (ret) {
491 dev_err(dev, "failed to enable tx clock\n");
492 return ret;
493 }
494
495 if (priv->clk_en_info & RXCLK_EN) {
496 ret = clk_enable(&priv->rx_clk);
497 if (ret) {
498 dev_err(dev, "failed to enable rx clock\n");
499 return ret;
500 }
501 }
502 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
503 ZYNQ_GEM_NWCTRL_TXEN_MASK);
504
505 return 0;
506 }
507
zynq_gem_send(struct udevice * dev,void * ptr,int len)508 static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
509 {
510 dma_addr_t addr;
511 u32 size;
512 struct zynq_gem_priv *priv = dev_get_priv(dev);
513 struct zynq_gem_regs *regs = priv->iobase;
514 struct emac_bd *current_bd = &priv->tx_bd[1];
515
516 /* Setup Tx BD */
517 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
518
519 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
520 #if defined(CONFIG_PHYS_64BIT)
521 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
522 #endif
523 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
524 ZYNQ_GEM_TXBUF_LAST_MASK;
525 /* Dummy descriptor to mark it as the last in descriptor chain */
526 current_bd->addr = 0x0;
527 #if defined(CONFIG_PHYS_64BIT)
528 current_bd->addr_hi = 0x0;
529 #endif
530 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
531 ZYNQ_GEM_TXBUF_LAST_MASK|
532 ZYNQ_GEM_TXBUF_USED_MASK;
533
534 /* setup BD */
535 writel(lower_32_bits((ulong)priv->tx_bd), ®s->txqbase);
536 #if defined(CONFIG_PHYS_64BIT)
537 writel(upper_32_bits((ulong)priv->tx_bd), ®s->upper_txqbase);
538 #endif
539
540 addr = (ulong) ptr;
541 addr &= ~(ARCH_DMA_MINALIGN - 1);
542 size = roundup(len, ARCH_DMA_MINALIGN);
543 flush_dcache_range(addr, addr + size);
544 barrier();
545
546 /* Start transmit */
547 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
548
549 /* Read TX BD status */
550 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
551 printf("TX buffers exhausted in mid frame\n");
552
553 return wait_for_bit_le32(®s->txsr, ZYNQ_GEM_TSR_DONE,
554 true, 20000, true);
555 }
556
557 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
zynq_gem_recv(struct udevice * dev,int flags,uchar ** packetp)558 static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
559 {
560 int frame_len;
561 dma_addr_t addr;
562 struct zynq_gem_priv *priv = dev_get_priv(dev);
563 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
564
565 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
566 return -1;
567
568 if (!(current_bd->status &
569 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
570 printf("GEM: SOF or EOF not set for last buffer received!\n");
571 return -1;
572 }
573
574 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
575 if (!frame_len) {
576 printf("%s: Zero size packet?\n", __func__);
577 return -1;
578 }
579
580 #if defined(CONFIG_PHYS_64BIT)
581 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
582 | ((dma_addr_t)current_bd->addr_hi << 32));
583 #else
584 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
585 #endif
586 addr &= ~(ARCH_DMA_MINALIGN - 1);
587
588 *packetp = (uchar *)(uintptr_t)addr;
589
590 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
591 barrier();
592
593 return frame_len;
594 }
595
zynq_gem_free_pkt(struct udevice * dev,uchar * packet,int length)596 static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
597 {
598 struct zynq_gem_priv *priv = dev_get_priv(dev);
599 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
600 struct emac_bd *first_bd;
601 dma_addr_t addr;
602
603 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
604 priv->rx_first_buf = priv->rxbd_current;
605 } else {
606 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
607 current_bd->status = 0xF0000000; /* FIXME */
608 }
609
610 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
611 first_bd = &priv->rx_bd[priv->rx_first_buf];
612 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
613 first_bd->status = 0xF0000000;
614 }
615
616 /* Flush the cache for the packet as well */
617 #if defined(CONFIG_PHYS_64BIT)
618 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
619 | ((dma_addr_t)current_bd->addr_hi << 32));
620 #else
621 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
622 #endif
623 flush_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN,
624 ARCH_DMA_MINALIGN));
625 barrier();
626
627 if ((++priv->rxbd_current) >= RX_BUF)
628 priv->rxbd_current = 0;
629
630 return 0;
631 }
632
zynq_gem_halt(struct udevice * dev)633 static void zynq_gem_halt(struct udevice *dev)
634 {
635 struct zynq_gem_priv *priv = dev_get_priv(dev);
636 struct zynq_gem_regs *regs = priv->iobase;
637
638 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
639 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
640 }
641
zynq_board_read_rom_ethaddr(unsigned char * ethaddr)642 __weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr)
643 {
644 return -ENOSYS;
645 }
646
zynq_gem_read_rom_mac(struct udevice * dev)647 static int zynq_gem_read_rom_mac(struct udevice *dev)
648 {
649 struct eth_pdata *pdata = dev_get_plat(dev);
650
651 if (!pdata)
652 return -ENOSYS;
653
654 return zynq_board_read_rom_ethaddr(pdata->enetaddr);
655 }
656
zynq_gem_miiphy_read(struct mii_dev * bus,int addr,int devad,int reg)657 static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
658 int devad, int reg)
659 {
660 struct zynq_gem_priv *priv = bus->priv;
661 int ret;
662 u16 val = 0;
663
664 ret = phyread(priv, addr, reg, &val);
665 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
666 return val;
667 }
668
zynq_gem_miiphy_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)669 static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
670 int reg, u16 value)
671 {
672 struct zynq_gem_priv *priv = bus->priv;
673
674 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
675 return phywrite(priv, addr, reg, value);
676 }
677
zynq_gem_probe(struct udevice * dev)678 static int zynq_gem_probe(struct udevice *dev)
679 {
680 void *bd_space;
681 struct zynq_gem_priv *priv = dev_get_priv(dev);
682 int ret;
683
684 /* Align rxbuffers to ARCH_DMA_MINALIGN */
685 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
686 if (!priv->rxbuffers)
687 return -ENOMEM;
688
689 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
690 ulong addr = (ulong)priv->rxbuffers;
691 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
692 barrier();
693
694 /* Align bd_space to MMU_SECTION_SHIFT */
695 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
696 if (!bd_space) {
697 ret = -ENOMEM;
698 goto err1;
699 }
700
701 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
702 BD_SPACE, DCACHE_OFF);
703
704 /* Initialize the bd spaces for tx and rx bd's */
705 priv->tx_bd = (struct emac_bd *)bd_space;
706 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
707
708 ret = clk_get_by_name(dev, "tx_clk", &priv->tx_clk);
709 if (ret < 0) {
710 dev_err(dev, "failed to get tx_clock\n");
711 goto err2;
712 }
713
714 if (priv->clk_en_info & RXCLK_EN) {
715 ret = clk_get_by_name(dev, "rx_clk", &priv->rx_clk);
716 if (ret < 0) {
717 dev_err(dev, "failed to get rx_clock\n");
718 goto err2;
719 }
720 }
721
722 priv->bus = mdio_alloc();
723 priv->bus->read = zynq_gem_miiphy_read;
724 priv->bus->write = zynq_gem_miiphy_write;
725 priv->bus->priv = priv;
726
727 ret = mdio_register_seq(priv->bus, dev_seq(dev));
728 if (ret)
729 goto err2;
730
731 ret = zynq_phy_init(dev);
732 if (ret)
733 goto err3;
734
735 return ret;
736
737 err3:
738 mdio_unregister(priv->bus);
739 err2:
740 free(priv->tx_bd);
741 err1:
742 free(priv->rxbuffers);
743 return ret;
744 }
745
zynq_gem_remove(struct udevice * dev)746 static int zynq_gem_remove(struct udevice *dev)
747 {
748 struct zynq_gem_priv *priv = dev_get_priv(dev);
749
750 free(priv->phydev);
751 mdio_unregister(priv->bus);
752 mdio_free(priv->bus);
753
754 return 0;
755 }
756
757 static const struct eth_ops zynq_gem_ops = {
758 .start = zynq_gem_init,
759 .send = zynq_gem_send,
760 .recv = zynq_gem_recv,
761 .free_pkt = zynq_gem_free_pkt,
762 .stop = zynq_gem_halt,
763 .write_hwaddr = zynq_gem_setup_mac,
764 .read_rom_hwaddr = zynq_gem_read_rom_mac,
765 };
766
zynq_gem_of_to_plat(struct udevice * dev)767 static int zynq_gem_of_to_plat(struct udevice *dev)
768 {
769 struct eth_pdata *pdata = dev_get_plat(dev);
770 struct zynq_gem_priv *priv = dev_get_priv(dev);
771 struct ofnode_phandle_args phandle_args;
772 const char *phy_mode;
773
774 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
775 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
776 priv->mdiobase = priv->iobase;
777 /* Hardcode for now */
778 priv->phyaddr = -1;
779
780 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
781 &phandle_args)) {
782 fdt_addr_t addr;
783 ofnode parent;
784
785 debug("phy-handle does exist %s\n", dev->name);
786 priv->phyaddr = ofnode_read_u32_default(phandle_args.node,
787 "reg", -1);
788 priv->phy_of_node = phandle_args.node;
789 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
790 "max-speed",
791 SPEED_1000);
792
793 parent = ofnode_get_parent(phandle_args.node);
794 addr = ofnode_get_addr(parent);
795 if (addr != FDT_ADDR_T_NONE) {
796 debug("MDIO bus not found %s\n", dev->name);
797 priv->mdiobase = (struct zynq_gem_regs *)addr;
798 }
799 }
800
801 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
802 if (phy_mode)
803 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
804 if (pdata->phy_interface == -1) {
805 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
806 return -EINVAL;
807 }
808 priv->interface = pdata->phy_interface;
809
810 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
811
812 printf("\nZYNQ GEM: %lx, mdio bus %lx, phyaddr %d, interface %s\n",
813 (ulong)priv->iobase, (ulong)priv->mdiobase, priv->phyaddr,
814 phy_string_for_interface(priv->interface));
815
816 priv->clk_en_info = dev_get_driver_data(dev);
817
818 return 0;
819 }
820
821 static const struct udevice_id zynq_gem_ids[] = {
822 { .compatible = "cdns,versal-gem", .data = RXCLK_EN },
823 { .compatible = "cdns,zynqmp-gem" },
824 { .compatible = "cdns,zynq-gem" },
825 { .compatible = "cdns,gem" },
826 { }
827 };
828
829 U_BOOT_DRIVER(zynq_gem) = {
830 .name = "zynq_gem",
831 .id = UCLASS_ETH,
832 .of_match = zynq_gem_ids,
833 .of_to_plat = zynq_gem_of_to_plat,
834 .probe = zynq_gem_probe,
835 .remove = zynq_gem_remove,
836 .ops = &zynq_gem_ops,
837 .priv_auto = sizeof(struct zynq_gem_priv),
838 .plat_auto = sizeof(struct eth_pdata),
839 };
840