1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
6 */
7
8 #include <bouncebuf.h>
9 #include <common.h>
10 #include <cpu_func.h>
11 #include <errno.h>
12 #include <log.h>
13 #include <malloc.h>
14 #include <memalign.h>
15 #include <mmc.h>
16 #include <dwmmc.h>
17 #include <wait_bit.h>
18 #include <asm/cache.h>
19 #include <linux/delay.h>
20 #include <power/regulator.h>
21
22 #define PAGE_SIZE 4096
23
dwmci_wait_reset(struct dwmci_host * host,u32 value)24 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
25 {
26 unsigned long timeout = 1000;
27 u32 ctrl;
28
29 dwmci_writel(host, DWMCI_CTRL, value);
30
31 while (timeout--) {
32 ctrl = dwmci_readl(host, DWMCI_CTRL);
33 if (!(ctrl & DWMCI_RESET_ALL))
34 return 1;
35 }
36 return 0;
37 }
38
dwmci_set_idma_desc(struct dwmci_idmac * idmac,u32 desc0,u32 desc1,u32 desc2)39 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
40 u32 desc0, u32 desc1, u32 desc2)
41 {
42 struct dwmci_idmac *desc = idmac;
43
44 desc->flags = desc0;
45 desc->cnt = desc1;
46 desc->addr = desc2;
47 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
48 }
49
dwmci_prepare_data(struct dwmci_host * host,struct mmc_data * data,struct dwmci_idmac * cur_idmac,void * bounce_buffer)50 static void dwmci_prepare_data(struct dwmci_host *host,
51 struct mmc_data *data,
52 struct dwmci_idmac *cur_idmac,
53 void *bounce_buffer)
54 {
55 unsigned long ctrl;
56 unsigned int i = 0, flags, cnt, blk_cnt;
57 ulong data_start, data_end;
58
59
60 blk_cnt = data->blocks;
61
62 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
63
64 /* Clear IDMAC interrupt */
65 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
66
67 data_start = (ulong)cur_idmac;
68 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
69
70 do {
71 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
72 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
73 if (blk_cnt <= 8) {
74 flags |= DWMCI_IDMAC_LD;
75 cnt = data->blocksize * blk_cnt;
76 } else
77 cnt = data->blocksize * 8;
78
79 dwmci_set_idma_desc(cur_idmac, flags, cnt,
80 (ulong)bounce_buffer + (i * PAGE_SIZE));
81
82 cur_idmac++;
83 if (blk_cnt <= 8)
84 break;
85 blk_cnt -= 8;
86 i++;
87 } while(1);
88
89 data_end = (ulong)cur_idmac;
90 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
91
92 ctrl = dwmci_readl(host, DWMCI_CTRL);
93 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
94 dwmci_writel(host, DWMCI_CTRL, ctrl);
95
96 ctrl = dwmci_readl(host, DWMCI_BMOD);
97 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
98 dwmci_writel(host, DWMCI_BMOD, ctrl);
99
100 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
101 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
102 }
103
dwmci_fifo_ready(struct dwmci_host * host,u32 bit,u32 * len)104 static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
105 {
106 u32 timeout = 20000;
107
108 *len = dwmci_readl(host, DWMCI_STATUS);
109 while (--timeout && (*len & bit)) {
110 udelay(200);
111 *len = dwmci_readl(host, DWMCI_STATUS);
112 }
113
114 if (!timeout) {
115 debug("%s: FIFO underflow timeout\n", __func__);
116 return -ETIMEDOUT;
117 }
118
119 return 0;
120 }
121
dwmci_get_timeout(struct mmc * mmc,const unsigned int size)122 static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
123 {
124 unsigned int timeout;
125
126 timeout = size * 8; /* counting in bits */
127 timeout *= 10; /* wait 10 times as long */
128 timeout /= mmc->clock;
129 timeout /= mmc->bus_width;
130 timeout /= mmc->ddr_mode ? 2 : 1;
131 timeout *= 1000; /* counting in msec */
132 timeout = (timeout < 1000) ? 1000 : timeout;
133
134 return timeout;
135 }
136
dwmci_data_transfer(struct dwmci_host * host,struct mmc_data * data)137 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
138 {
139 struct mmc *mmc = host->mmc;
140 int ret = 0;
141 u32 timeout, mask, size, i, len = 0;
142 u32 *buf = NULL;
143 ulong start = get_timer(0);
144 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
145 RX_WMARK_SHIFT) + 1) * 2;
146
147 size = data->blocksize * data->blocks;
148 if (data->flags == MMC_DATA_READ)
149 buf = (unsigned int *)data->dest;
150 else
151 buf = (unsigned int *)data->src;
152
153 timeout = dwmci_get_timeout(mmc, size);
154
155 size /= 4;
156
157 for (;;) {
158 mask = dwmci_readl(host, DWMCI_RINTSTS);
159 /* Error during data transfer. */
160 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
161 debug("%s: DATA ERROR!\n", __func__);
162 ret = -EINVAL;
163 break;
164 }
165
166 if (host->fifo_mode && size) {
167 len = 0;
168 if (data->flags == MMC_DATA_READ &&
169 (mask & DWMCI_INTMSK_RXDR)) {
170 while (size) {
171 ret = dwmci_fifo_ready(host,
172 DWMCI_FIFO_EMPTY,
173 &len);
174 if (ret < 0)
175 break;
176
177 len = (len >> DWMCI_FIFO_SHIFT) &
178 DWMCI_FIFO_MASK;
179 len = min(size, len);
180 for (i = 0; i < len; i++)
181 *buf++ =
182 dwmci_readl(host, DWMCI_DATA);
183 size = size > len ? (size - len) : 0;
184 }
185 dwmci_writel(host, DWMCI_RINTSTS,
186 DWMCI_INTMSK_RXDR);
187 } else if (data->flags == MMC_DATA_WRITE &&
188 (mask & DWMCI_INTMSK_TXDR)) {
189 while (size) {
190 ret = dwmci_fifo_ready(host,
191 DWMCI_FIFO_FULL,
192 &len);
193 if (ret < 0)
194 break;
195
196 len = fifo_depth - ((len >>
197 DWMCI_FIFO_SHIFT) &
198 DWMCI_FIFO_MASK);
199 len = min(size, len);
200 for (i = 0; i < len; i++)
201 dwmci_writel(host, DWMCI_DATA,
202 *buf++);
203 size = size > len ? (size - len) : 0;
204 }
205 dwmci_writel(host, DWMCI_RINTSTS,
206 DWMCI_INTMSK_TXDR);
207 }
208 }
209
210 /* Data arrived correctly. */
211 if (mask & DWMCI_INTMSK_DTO) {
212 ret = 0;
213 break;
214 }
215
216 /* Check for timeout. */
217 if (get_timer(start) > timeout) {
218 debug("%s: Timeout waiting for data!\n",
219 __func__);
220 ret = -ETIMEDOUT;
221 break;
222 }
223 }
224
225 dwmci_writel(host, DWMCI_RINTSTS, mask);
226
227 return ret;
228 }
229
dwmci_set_transfer_mode(struct dwmci_host * host,struct mmc_data * data)230 static int dwmci_set_transfer_mode(struct dwmci_host *host,
231 struct mmc_data *data)
232 {
233 unsigned long mode;
234
235 mode = DWMCI_CMD_DATA_EXP;
236 if (data->flags & MMC_DATA_WRITE)
237 mode |= DWMCI_CMD_RW;
238
239 return mode;
240 }
241
242 #ifdef CONFIG_DM_MMC
dwmci_send_cmd(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)243 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
244 struct mmc_data *data)
245 {
246 struct mmc *mmc = mmc_get_mmc_dev(dev);
247 #else
248 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
249 struct mmc_data *data)
250 {
251 #endif
252 struct dwmci_host *host = mmc->priv;
253 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
254 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
255 int ret = 0, flags = 0, i;
256 unsigned int timeout = 500;
257 u32 retry = 100000;
258 u32 mask, ctrl;
259 ulong start = get_timer(0);
260 struct bounce_buffer bbstate;
261
262 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
263 if (get_timer(start) > timeout) {
264 debug("%s: Timeout on data busy\n", __func__);
265 return -ETIMEDOUT;
266 }
267 }
268
269 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
270
271 if (data) {
272 if (host->fifo_mode) {
273 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
274 dwmci_writel(host, DWMCI_BYTCNT,
275 data->blocksize * data->blocks);
276 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
277 } else {
278 if (data->flags == MMC_DATA_READ) {
279 ret = bounce_buffer_start(&bbstate,
280 (void*)data->dest,
281 data->blocksize *
282 data->blocks, GEN_BB_WRITE);
283 } else {
284 ret = bounce_buffer_start(&bbstate,
285 (void*)data->src,
286 data->blocksize *
287 data->blocks, GEN_BB_READ);
288 }
289
290 if (ret)
291 return ret;
292
293 dwmci_prepare_data(host, data, cur_idmac,
294 bbstate.bounce_buffer);
295 }
296 }
297
298 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
299
300 if (data)
301 flags = dwmci_set_transfer_mode(host, data);
302
303 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
304 return -1;
305
306 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
307 flags |= DWMCI_CMD_ABORT_STOP;
308 else
309 flags |= DWMCI_CMD_PRV_DAT_WAIT;
310
311 if (cmd->resp_type & MMC_RSP_PRESENT) {
312 flags |= DWMCI_CMD_RESP_EXP;
313 if (cmd->resp_type & MMC_RSP_136)
314 flags |= DWMCI_CMD_RESP_LENGTH;
315 }
316
317 if (cmd->resp_type & MMC_RSP_CRC)
318 flags |= DWMCI_CMD_CHECK_CRC;
319
320 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
321
322 debug("Sending CMD%d\n",cmd->cmdidx);
323
324 dwmci_writel(host, DWMCI_CMD, flags);
325
326 for (i = 0; i < retry; i++) {
327 mask = dwmci_readl(host, DWMCI_RINTSTS);
328 if (mask & DWMCI_INTMSK_CDONE) {
329 if (!data)
330 dwmci_writel(host, DWMCI_RINTSTS, mask);
331 break;
332 }
333 }
334
335 if (i == retry) {
336 debug("%s: Timeout.\n", __func__);
337 return -ETIMEDOUT;
338 }
339
340 if (mask & DWMCI_INTMSK_RTO) {
341 /*
342 * Timeout here is not necessarily fatal. (e)MMC cards
343 * will splat here when they receive CMD55 as they do
344 * not support this command and that is exactly the way
345 * to tell them apart from SD cards. Thus, this output
346 * below shall be debug(). eMMC cards also do not favor
347 * CMD8, please keep that in mind.
348 */
349 debug("%s: Response Timeout.\n", __func__);
350 return -ETIMEDOUT;
351 } else if (mask & DWMCI_INTMSK_RE) {
352 debug("%s: Response Error.\n", __func__);
353 return -EIO;
354 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
355 (mask & DWMCI_INTMSK_RCRC)) {
356 debug("%s: Response CRC Error.\n", __func__);
357 return -EIO;
358 }
359
360
361 if (cmd->resp_type & MMC_RSP_PRESENT) {
362 if (cmd->resp_type & MMC_RSP_136) {
363 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
364 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
365 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
366 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
367 } else {
368 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
369 }
370 }
371
372 if (data) {
373 ret = dwmci_data_transfer(host, data);
374
375 /* only dma mode need it */
376 if (!host->fifo_mode) {
377 if (data->flags == MMC_DATA_READ)
378 mask = DWMCI_IDINTEN_RI;
379 else
380 mask = DWMCI_IDINTEN_TI;
381 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
382 mask, true, 1000, false);
383 if (ret)
384 debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
385 __func__, mask);
386 /* clear interrupts */
387 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
388
389 ctrl = dwmci_readl(host, DWMCI_CTRL);
390 ctrl &= ~(DWMCI_DMA_EN);
391 dwmci_writel(host, DWMCI_CTRL, ctrl);
392 bounce_buffer_stop(&bbstate);
393 }
394 }
395
396 udelay(100);
397
398 return ret;
399 }
400
401 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
402 {
403 u32 div, status;
404 int timeout = 10000;
405 unsigned long sclk;
406
407 if ((freq == host->clock) || (freq == 0))
408 return 0;
409 /*
410 * If host->get_mmc_clk isn't defined,
411 * then assume that host->bus_hz is source clock value.
412 * host->bus_hz should be set by user.
413 */
414 if (host->get_mmc_clk)
415 sclk = host->get_mmc_clk(host, freq);
416 else if (host->bus_hz)
417 sclk = host->bus_hz;
418 else {
419 debug("%s: Didn't get source clock value.\n", __func__);
420 return -EINVAL;
421 }
422
423 if (sclk == freq)
424 div = 0; /* bypass mode */
425 else
426 div = DIV_ROUND_UP(sclk, 2 * freq);
427
428 dwmci_writel(host, DWMCI_CLKENA, 0);
429 dwmci_writel(host, DWMCI_CLKSRC, 0);
430
431 dwmci_writel(host, DWMCI_CLKDIV, div);
432 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
433 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
434
435 do {
436 status = dwmci_readl(host, DWMCI_CMD);
437 if (timeout-- < 0) {
438 debug("%s: Timeout!\n", __func__);
439 return -ETIMEDOUT;
440 }
441 } while (status & DWMCI_CMD_START);
442
443 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
444 DWMCI_CLKEN_LOW_PWR);
445
446 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
447 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
448
449 timeout = 10000;
450 do {
451 status = dwmci_readl(host, DWMCI_CMD);
452 if (timeout-- < 0) {
453 debug("%s: Timeout!\n", __func__);
454 return -ETIMEDOUT;
455 }
456 } while (status & DWMCI_CMD_START);
457
458 host->clock = freq;
459
460 return 0;
461 }
462
463 #ifdef CONFIG_DM_MMC
464 static int dwmci_set_ios(struct udevice *dev)
465 {
466 struct mmc *mmc = mmc_get_mmc_dev(dev);
467 #else
468 static int dwmci_set_ios(struct mmc *mmc)
469 {
470 #endif
471 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
472 u32 ctype, regs;
473
474 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
475
476 dwmci_setup_bus(host, mmc->clock);
477 switch (mmc->bus_width) {
478 case 8:
479 ctype = DWMCI_CTYPE_8BIT;
480 break;
481 case 4:
482 ctype = DWMCI_CTYPE_4BIT;
483 break;
484 default:
485 ctype = DWMCI_CTYPE_1BIT;
486 break;
487 }
488
489 dwmci_writel(host, DWMCI_CTYPE, ctype);
490
491 regs = dwmci_readl(host, DWMCI_UHS_REG);
492 if (mmc->ddr_mode)
493 regs |= DWMCI_DDR_MODE;
494 else
495 regs &= ~DWMCI_DDR_MODE;
496
497 dwmci_writel(host, DWMCI_UHS_REG, regs);
498
499 if (host->clksel) {
500 int ret;
501
502 ret = host->clksel(host);
503 if (ret)
504 return ret;
505 }
506
507 #if CONFIG_IS_ENABLED(DM_REGULATOR)
508 if (mmc->vqmmc_supply) {
509 int ret;
510
511 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
512 regulator_set_value(mmc->vqmmc_supply, 1800000);
513 else
514 regulator_set_value(mmc->vqmmc_supply, 3300000);
515
516 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
517 if (ret)
518 return ret;
519 }
520 #endif
521
522 return 0;
523 }
524
525 static int dwmci_init(struct mmc *mmc)
526 {
527 struct dwmci_host *host = mmc->priv;
528
529 if (host->board_init)
530 host->board_init(host);
531
532 dwmci_writel(host, DWMCI_PWREN, 1);
533
534 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
535 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
536 return -EIO;
537 }
538
539 /* Enumerate at 400KHz */
540 dwmci_setup_bus(host, mmc->cfg->f_min);
541
542 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
543 dwmci_writel(host, DWMCI_INTMASK, 0);
544
545 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
546
547 dwmci_writel(host, DWMCI_IDINTEN, 0);
548 dwmci_writel(host, DWMCI_BMOD, 1);
549
550 if (!host->fifoth_val) {
551 uint32_t fifo_size;
552
553 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
554 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
555 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
556 TX_WMARK(fifo_size / 2);
557 }
558 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
559
560 dwmci_writel(host, DWMCI_CLKENA, 0);
561 dwmci_writel(host, DWMCI_CLKSRC, 0);
562
563 if (!host->fifo_mode)
564 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
565
566 return 0;
567 }
568
569 #ifdef CONFIG_DM_MMC
570 int dwmci_probe(struct udevice *dev)
571 {
572 struct mmc *mmc = mmc_get_mmc_dev(dev);
573
574 return dwmci_init(mmc);
575 }
576
577 const struct dm_mmc_ops dm_dwmci_ops = {
578 .send_cmd = dwmci_send_cmd,
579 .set_ios = dwmci_set_ios,
580 };
581
582 #else
583 static const struct mmc_ops dwmci_ops = {
584 .send_cmd = dwmci_send_cmd,
585 .set_ios = dwmci_set_ios,
586 .init = dwmci_init,
587 };
588 #endif
589
590 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
591 u32 max_clk, u32 min_clk)
592 {
593 cfg->name = host->name;
594 #ifndef CONFIG_DM_MMC
595 cfg->ops = &dwmci_ops;
596 #endif
597 cfg->f_min = min_clk;
598 cfg->f_max = max_clk;
599
600 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
601
602 cfg->host_caps = host->caps;
603
604 if (host->buswidth == 8) {
605 cfg->host_caps |= MMC_MODE_8BIT;
606 cfg->host_caps &= ~MMC_MODE_4BIT;
607 } else {
608 cfg->host_caps |= MMC_MODE_4BIT;
609 cfg->host_caps &= ~MMC_MODE_8BIT;
610 }
611 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
612
613 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
614 }
615
616 #ifdef CONFIG_BLK
617 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
618 {
619 return mmc_bind(dev, mmc, cfg);
620 }
621 #else
622 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
623 {
624 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
625
626 host->mmc = mmc_create(&host->cfg, host);
627 if (host->mmc == NULL)
628 return -1;
629
630 return 0;
631 }
632 #endif
633