1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, Freescale Semiconductor, Inc
4  * Copyright 2020 NXP
5  * Andy Fleming
6  *
7  * Based vaguely on the Linux code
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <blk.h>
13 #include <command.h>
14 #include <dm.h>
15 #include <log.h>
16 #include <dm/device-internal.h>
17 #include <errno.h>
18 #include <mmc.h>
19 #include <part.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
23 #include <malloc.h>
24 #include <memalign.h>
25 #include <linux/list.h>
26 #include <div64.h>
27 #include "mmc_private.h"
28 
29 #define DEFAULT_CMD6_TIMEOUT_MS  500
30 
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
32 
33 #if !CONFIG_IS_ENABLED(DM_MMC)
34 
mmc_wait_dat0(struct mmc * mmc,int state,int timeout_us)35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
36 {
37 	return -ENOSYS;
38 }
39 
board_mmc_getwp(struct mmc * mmc)40 __weak int board_mmc_getwp(struct mmc *mmc)
41 {
42 	return -1;
43 }
44 
mmc_getwp(struct mmc * mmc)45 int mmc_getwp(struct mmc *mmc)
46 {
47 	int wp;
48 
49 	wp = board_mmc_getwp(mmc);
50 
51 	if (wp < 0) {
52 		if (mmc->cfg->ops->getwp)
53 			wp = mmc->cfg->ops->getwp(mmc);
54 		else
55 			wp = 0;
56 	}
57 
58 	return wp;
59 }
60 
board_mmc_getcd(struct mmc * mmc)61 __weak int board_mmc_getcd(struct mmc *mmc)
62 {
63 	return -1;
64 }
65 #endif
66 
67 #ifdef CONFIG_MMC_TRACE
mmmc_trace_before_send(struct mmc * mmc,struct mmc_cmd * cmd)68 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 {
70 	printf("CMD_SEND:%d\n", cmd->cmdidx);
71 	printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
72 }
73 
mmmc_trace_after_send(struct mmc * mmc,struct mmc_cmd * cmd,int ret)74 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75 {
76 	int i;
77 	u8 *ptr;
78 
79 	if (ret) {
80 		printf("\t\tRET\t\t\t %d\n", ret);
81 	} else {
82 		switch (cmd->resp_type) {
83 		case MMC_RSP_NONE:
84 			printf("\t\tMMC_RSP_NONE\n");
85 			break;
86 		case MMC_RSP_R1:
87 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
88 				cmd->response[0]);
89 			break;
90 		case MMC_RSP_R1b:
91 			printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
92 				cmd->response[0]);
93 			break;
94 		case MMC_RSP_R2:
95 			printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 				cmd->response[0]);
97 			printf("\t\t          \t\t 0x%08x \n",
98 				cmd->response[1]);
99 			printf("\t\t          \t\t 0x%08x \n",
100 				cmd->response[2]);
101 			printf("\t\t          \t\t 0x%08x \n",
102 				cmd->response[3]);
103 			printf("\n");
104 			printf("\t\t\t\t\tDUMPING DATA\n");
105 			for (i = 0; i < 4; i++) {
106 				int j;
107 				printf("\t\t\t\t\t%03d - ", i*4);
108 				ptr = (u8 *)&cmd->response[i];
109 				ptr += 3;
110 				for (j = 0; j < 4; j++)
111 					printf("%02x ", *ptr--);
112 				printf("\n");
113 			}
114 			break;
115 		case MMC_RSP_R3:
116 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
117 				cmd->response[0]);
118 			break;
119 		default:
120 			printf("\t\tERROR MMC rsp not supported\n");
121 			break;
122 		}
123 	}
124 }
125 
mmc_trace_state(struct mmc * mmc,struct mmc_cmd * cmd)126 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
127 {
128 	int status;
129 
130 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 	printf("CURR STATE:%d\n", status);
132 }
133 #endif
134 
135 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
mmc_mode_name(enum bus_mode mode)136 const char *mmc_mode_name(enum bus_mode mode)
137 {
138 	static const char *const names[] = {
139 	      [MMC_LEGACY]	= "MMC legacy",
140 	      [MMC_HS]		= "MMC High Speed (26MHz)",
141 	      [SD_HS]		= "SD High Speed (50MHz)",
142 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
143 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
144 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
145 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
146 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
147 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
148 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
149 	      [MMC_HS_200]	= "HS200 (200MHz)",
150 	      [MMC_HS_400]	= "HS400 (200MHz)",
151 	      [MMC_HS_400_ES]	= "HS400ES (200MHz)",
152 	};
153 
154 	if (mode >= MMC_MODES_END)
155 		return "Unknown mode";
156 	else
157 		return names[mode];
158 }
159 #endif
160 
mmc_mode2freq(struct mmc * mmc,enum bus_mode mode)161 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 {
163 	static const int freqs[] = {
164 	      [MMC_LEGACY]	= 25000000,
165 	      [MMC_HS]		= 26000000,
166 	      [SD_HS]		= 50000000,
167 	      [MMC_HS_52]	= 52000000,
168 	      [MMC_DDR_52]	= 52000000,
169 	      [UHS_SDR12]	= 25000000,
170 	      [UHS_SDR25]	= 50000000,
171 	      [UHS_SDR50]	= 100000000,
172 	      [UHS_DDR50]	= 50000000,
173 	      [UHS_SDR104]	= 208000000,
174 	      [MMC_HS_200]	= 200000000,
175 	      [MMC_HS_400]	= 200000000,
176 	      [MMC_HS_400_ES]	= 200000000,
177 	};
178 
179 	if (mode == MMC_LEGACY)
180 		return mmc->legacy_speed;
181 	else if (mode >= MMC_MODES_END)
182 		return 0;
183 	else
184 		return freqs[mode];
185 }
186 
mmc_select_mode(struct mmc * mmc,enum bus_mode mode)187 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 {
189 	mmc->selected_mode = mode;
190 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
191 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
192 	pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 		 mmc->tran_speed / 1000000);
194 	return 0;
195 }
196 
197 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_send_cmd(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data)198 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199 {
200 	int ret;
201 
202 	mmmc_trace_before_send(mmc, cmd);
203 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 	mmmc_trace_after_send(mmc, cmd, ret);
205 
206 	return ret;
207 }
208 #endif
209 
210 /**
211  * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
212  *
213  * @dev:	device to receive the command
214  * @cmd:	command to send
215  * @data:	additional data to send/receive
216  * @retries:	how many times to retry; mmc_send_cmd is always called at least
217  *              once
218  * @return 0 if ok, -ve on error
219  */
mmc_send_cmd_retry(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data,uint retries)220 static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
221 			      struct mmc_data *data, uint retries)
222 {
223 	int ret;
224 
225 	do {
226 		ret = mmc_send_cmd(mmc, cmd, data);
227 	} while (ret && retries--);
228 
229 	return ret;
230 }
231 
232 /**
233  * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
234  *                         specific quirk is enabled
235  *
236  * @dev:	device to receive the command
237  * @cmd:	command to send
238  * @data:	additional data to send/receive
239  * @quirk:	retry only if this quirk is enabled
240  * @retries:	how many times to retry; mmc_send_cmd is always called at least
241  *              once
242  * @return 0 if ok, -ve on error
243  */
mmc_send_cmd_quirks(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data,u32 quirk,uint retries)244 static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
245 			       struct mmc_data *data, u32 quirk, uint retries)
246 {
247 	if (CONFIG_IS_ENABLED(MMC_QUIRKS) && mmc->quirks & quirk)
248 		return mmc_send_cmd_retry(mmc, cmd, data, retries);
249 	else
250 		return mmc_send_cmd(mmc, cmd, data);
251 }
252 
mmc_send_status(struct mmc * mmc,unsigned int * status)253 int mmc_send_status(struct mmc *mmc, unsigned int *status)
254 {
255 	struct mmc_cmd cmd;
256 	int ret;
257 
258 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
259 	cmd.resp_type = MMC_RSP_R1;
260 	if (!mmc_host_is_spi(mmc))
261 		cmd.cmdarg = mmc->rca << 16;
262 
263 	ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
264 	mmc_trace_state(mmc, &cmd);
265 	if (!ret)
266 		*status = cmd.response[0];
267 
268 	return ret;
269 }
270 
mmc_poll_for_busy(struct mmc * mmc,int timeout_ms)271 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
272 {
273 	unsigned int status;
274 	int err;
275 
276 	err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
277 	if (err != -ENOSYS)
278 		return err;
279 
280 	while (1) {
281 		err = mmc_send_status(mmc, &status);
282 		if (err)
283 			return err;
284 
285 		if ((status & MMC_STATUS_RDY_FOR_DATA) &&
286 		    (status & MMC_STATUS_CURR_STATE) !=
287 		     MMC_STATE_PRG)
288 			break;
289 
290 		if (status & MMC_STATUS_MASK) {
291 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
292 			pr_err("Status Error: 0x%08x\n", status);
293 #endif
294 			return -ECOMM;
295 		}
296 
297 		if (timeout_ms-- <= 0)
298 			break;
299 
300 		udelay(1000);
301 	}
302 
303 	if (timeout_ms <= 0) {
304 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
305 		pr_err("Timeout waiting card ready\n");
306 #endif
307 		return -ETIMEDOUT;
308 	}
309 
310 	return 0;
311 }
312 
mmc_set_blocklen(struct mmc * mmc,int len)313 int mmc_set_blocklen(struct mmc *mmc, int len)
314 {
315 	struct mmc_cmd cmd;
316 
317 	if (mmc->ddr_mode)
318 		return 0;
319 
320 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
321 	cmd.resp_type = MMC_RSP_R1;
322 	cmd.cmdarg = len;
323 
324 	return mmc_send_cmd_quirks(mmc, &cmd, NULL,
325 				   MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
326 }
327 
328 #ifdef MMC_SUPPORTS_TUNING
329 static const u8 tuning_blk_pattern_4bit[] = {
330 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
331 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
332 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
333 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
334 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
335 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
336 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
337 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
338 };
339 
340 static const u8 tuning_blk_pattern_8bit[] = {
341 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
342 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
343 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
344 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
345 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
346 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
347 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
348 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
349 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
350 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
351 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
352 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
353 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
354 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
355 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
356 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
357 };
358 
mmc_send_tuning(struct mmc * mmc,u32 opcode,int * cmd_error)359 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
360 {
361 	struct mmc_cmd cmd;
362 	struct mmc_data data;
363 	const u8 *tuning_block_pattern;
364 	int size, err;
365 
366 	if (mmc->bus_width == 8) {
367 		tuning_block_pattern = tuning_blk_pattern_8bit;
368 		size = sizeof(tuning_blk_pattern_8bit);
369 	} else if (mmc->bus_width == 4) {
370 		tuning_block_pattern = tuning_blk_pattern_4bit;
371 		size = sizeof(tuning_blk_pattern_4bit);
372 	} else {
373 		return -EINVAL;
374 	}
375 
376 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
377 
378 	cmd.cmdidx = opcode;
379 	cmd.cmdarg = 0;
380 	cmd.resp_type = MMC_RSP_R1;
381 
382 	data.dest = (void *)data_buf;
383 	data.blocks = 1;
384 	data.blocksize = size;
385 	data.flags = MMC_DATA_READ;
386 
387 	err = mmc_send_cmd(mmc, &cmd, &data);
388 	if (err)
389 		return err;
390 
391 	if (memcmp(data_buf, tuning_block_pattern, size))
392 		return -EIO;
393 
394 	return 0;
395 }
396 #endif
397 
mmc_read_blocks(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)398 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
399 			   lbaint_t blkcnt)
400 {
401 	struct mmc_cmd cmd;
402 	struct mmc_data data;
403 
404 	if (blkcnt > 1)
405 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
406 	else
407 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
408 
409 	if (mmc->high_capacity)
410 		cmd.cmdarg = start;
411 	else
412 		cmd.cmdarg = start * mmc->read_bl_len;
413 
414 	cmd.resp_type = MMC_RSP_R1;
415 
416 	data.dest = dst;
417 	data.blocks = blkcnt;
418 	data.blocksize = mmc->read_bl_len;
419 	data.flags = MMC_DATA_READ;
420 
421 	if (mmc_send_cmd(mmc, &cmd, &data))
422 		return 0;
423 
424 	if (blkcnt > 1) {
425 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
426 		cmd.cmdarg = 0;
427 		cmd.resp_type = MMC_RSP_R1b;
428 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
429 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
430 			pr_err("mmc fail to send stop cmd\n");
431 #endif
432 			return 0;
433 		}
434 	}
435 
436 	return blkcnt;
437 }
438 
439 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_get_b_max(struct mmc * mmc,void * dst,lbaint_t blkcnt)440 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
441 {
442 	if (mmc->cfg->ops->get_b_max)
443 		return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
444 	else
445 		return mmc->cfg->b_max;
446 }
447 #endif
448 
449 #if CONFIG_IS_ENABLED(BLK)
mmc_bread(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)450 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
451 #else
452 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
453 		void *dst)
454 #endif
455 {
456 #if CONFIG_IS_ENABLED(BLK)
457 	struct blk_desc *block_dev = dev_get_uclass_plat(dev);
458 #endif
459 	int dev_num = block_dev->devnum;
460 	int err;
461 	lbaint_t cur, blocks_todo = blkcnt;
462 	uint b_max;
463 
464 	if (blkcnt == 0)
465 		return 0;
466 
467 	struct mmc *mmc = find_mmc_device(dev_num);
468 	if (!mmc)
469 		return 0;
470 
471 	if (CONFIG_IS_ENABLED(MMC_TINY))
472 		err = mmc_switch_part(mmc, block_dev->hwpart);
473 	else
474 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
475 
476 	if (err < 0)
477 		return 0;
478 
479 	if ((start + blkcnt) > block_dev->lba) {
480 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
481 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
482 		       start + blkcnt, block_dev->lba);
483 #endif
484 		return 0;
485 	}
486 
487 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
488 		pr_debug("%s: Failed to set blocklen\n", __func__);
489 		return 0;
490 	}
491 
492 	b_max = mmc_get_b_max(mmc, dst, blkcnt);
493 
494 	do {
495 		cur = (blocks_todo > b_max) ? b_max : blocks_todo;
496 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
497 			pr_debug("%s: Failed to read blocks\n", __func__);
498 			return 0;
499 		}
500 		blocks_todo -= cur;
501 		start += cur;
502 		dst += cur * mmc->read_bl_len;
503 	} while (blocks_todo > 0);
504 
505 	return blkcnt;
506 }
507 
mmc_go_idle(struct mmc * mmc)508 static int mmc_go_idle(struct mmc *mmc)
509 {
510 	struct mmc_cmd cmd;
511 	int err;
512 
513 	udelay(1000);
514 
515 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
516 	cmd.cmdarg = 0;
517 	cmd.resp_type = MMC_RSP_NONE;
518 
519 	err = mmc_send_cmd(mmc, &cmd, NULL);
520 
521 	if (err)
522 		return err;
523 
524 	udelay(2000);
525 
526 	return 0;
527 }
528 
529 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
mmc_switch_voltage(struct mmc * mmc,int signal_voltage)530 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
531 {
532 	struct mmc_cmd cmd;
533 	int err = 0;
534 
535 	/*
536 	 * Send CMD11 only if the request is to switch the card to
537 	 * 1.8V signalling.
538 	 */
539 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
540 		return mmc_set_signal_voltage(mmc, signal_voltage);
541 
542 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
543 	cmd.cmdarg = 0;
544 	cmd.resp_type = MMC_RSP_R1;
545 
546 	err = mmc_send_cmd(mmc, &cmd, NULL);
547 	if (err)
548 		return err;
549 
550 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
551 		return -EIO;
552 
553 	/*
554 	 * The card should drive cmd and dat[0:3] low immediately
555 	 * after the response of cmd11, but wait 100 us to be sure
556 	 */
557 	err = mmc_wait_dat0(mmc, 0, 100);
558 	if (err == -ENOSYS)
559 		udelay(100);
560 	else if (err)
561 		return -ETIMEDOUT;
562 
563 	/*
564 	 * During a signal voltage level switch, the clock must be gated
565 	 * for 5 ms according to the SD spec
566 	 */
567 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
568 
569 	err = mmc_set_signal_voltage(mmc, signal_voltage);
570 	if (err)
571 		return err;
572 
573 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
574 	mdelay(10);
575 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
576 
577 	/*
578 	 * Failure to switch is indicated by the card holding
579 	 * dat[0:3] low. Wait for at least 1 ms according to spec
580 	 */
581 	err = mmc_wait_dat0(mmc, 1, 1000);
582 	if (err == -ENOSYS)
583 		udelay(1000);
584 	else if (err)
585 		return -ETIMEDOUT;
586 
587 	return 0;
588 }
589 #endif
590 
sd_send_op_cond(struct mmc * mmc,bool uhs_en)591 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
592 {
593 	int timeout = 1000;
594 	int err;
595 	struct mmc_cmd cmd;
596 
597 	while (1) {
598 		cmd.cmdidx = MMC_CMD_APP_CMD;
599 		cmd.resp_type = MMC_RSP_R1;
600 		cmd.cmdarg = 0;
601 
602 		err = mmc_send_cmd(mmc, &cmd, NULL);
603 
604 		if (err)
605 			return err;
606 
607 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
608 		cmd.resp_type = MMC_RSP_R3;
609 
610 		/*
611 		 * Most cards do not answer if some reserved bits
612 		 * in the ocr are set. However, Some controller
613 		 * can set bit 7 (reserved for low voltages), but
614 		 * how to manage low voltages SD card is not yet
615 		 * specified.
616 		 */
617 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
618 			(mmc->cfg->voltages & 0xff8000);
619 
620 		if (mmc->version == SD_VERSION_2)
621 			cmd.cmdarg |= OCR_HCS;
622 
623 		if (uhs_en)
624 			cmd.cmdarg |= OCR_S18R;
625 
626 		err = mmc_send_cmd(mmc, &cmd, NULL);
627 
628 		if (err)
629 			return err;
630 
631 		if (cmd.response[0] & OCR_BUSY)
632 			break;
633 
634 		if (timeout-- <= 0)
635 			return -EOPNOTSUPP;
636 
637 		udelay(1000);
638 	}
639 
640 	if (mmc->version != SD_VERSION_2)
641 		mmc->version = SD_VERSION_1_0;
642 
643 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
644 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
645 		cmd.resp_type = MMC_RSP_R3;
646 		cmd.cmdarg = 0;
647 
648 		err = mmc_send_cmd(mmc, &cmd, NULL);
649 
650 		if (err)
651 			return err;
652 	}
653 
654 	mmc->ocr = cmd.response[0];
655 
656 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
657 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
658 	    == 0x41000000) {
659 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
660 		if (err)
661 			return err;
662 	}
663 #endif
664 
665 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
666 	mmc->rca = 0;
667 
668 	return 0;
669 }
670 
mmc_send_op_cond_iter(struct mmc * mmc,int use_arg)671 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
672 {
673 	struct mmc_cmd cmd;
674 	int err;
675 
676 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
677 	cmd.resp_type = MMC_RSP_R3;
678 	cmd.cmdarg = 0;
679 	if (use_arg && !mmc_host_is_spi(mmc))
680 		cmd.cmdarg = OCR_HCS |
681 			(mmc->cfg->voltages &
682 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
683 			(mmc->ocr & OCR_ACCESS_MODE);
684 
685 	err = mmc_send_cmd(mmc, &cmd, NULL);
686 	if (err)
687 		return err;
688 	mmc->ocr = cmd.response[0];
689 	return 0;
690 }
691 
mmc_send_op_cond(struct mmc * mmc)692 static int mmc_send_op_cond(struct mmc *mmc)
693 {
694 	int err, i;
695 	int timeout = 1000;
696 	uint start;
697 
698 	/* Some cards seem to need this */
699 	mmc_go_idle(mmc);
700 
701 	start = get_timer(0);
702  	/* Asking to the card its capabilities */
703 	for (i = 0; ; i++) {
704 		err = mmc_send_op_cond_iter(mmc, i != 0);
705 		if (err)
706 			return err;
707 
708 		/* exit if not busy (flag seems to be inverted) */
709 		if (mmc->ocr & OCR_BUSY)
710 			break;
711 
712 		if (get_timer(start) > timeout)
713 			return -ETIMEDOUT;
714 		udelay(100);
715 	}
716 	mmc->op_cond_pending = 1;
717 	return 0;
718 }
719 
mmc_complete_op_cond(struct mmc * mmc)720 static int mmc_complete_op_cond(struct mmc *mmc)
721 {
722 	struct mmc_cmd cmd;
723 	int timeout = 1000;
724 	ulong start;
725 	int err;
726 
727 	mmc->op_cond_pending = 0;
728 	if (!(mmc->ocr & OCR_BUSY)) {
729 		/* Some cards seem to need this */
730 		mmc_go_idle(mmc);
731 
732 		start = get_timer(0);
733 		while (1) {
734 			err = mmc_send_op_cond_iter(mmc, 1);
735 			if (err)
736 				return err;
737 			if (mmc->ocr & OCR_BUSY)
738 				break;
739 			if (get_timer(start) > timeout)
740 				return -EOPNOTSUPP;
741 			udelay(100);
742 		}
743 	}
744 
745 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
746 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
747 		cmd.resp_type = MMC_RSP_R3;
748 		cmd.cmdarg = 0;
749 
750 		err = mmc_send_cmd(mmc, &cmd, NULL);
751 
752 		if (err)
753 			return err;
754 
755 		mmc->ocr = cmd.response[0];
756 	}
757 
758 	mmc->version = MMC_VERSION_UNKNOWN;
759 
760 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
761 	mmc->rca = 1;
762 
763 	return 0;
764 }
765 
766 
mmc_send_ext_csd(struct mmc * mmc,u8 * ext_csd)767 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
768 {
769 	struct mmc_cmd cmd;
770 	struct mmc_data data;
771 	int err;
772 
773 	/* Get the Card Status Register */
774 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
775 	cmd.resp_type = MMC_RSP_R1;
776 	cmd.cmdarg = 0;
777 
778 	data.dest = (char *)ext_csd;
779 	data.blocks = 1;
780 	data.blocksize = MMC_MAX_BLOCK_LEN;
781 	data.flags = MMC_DATA_READ;
782 
783 	err = mmc_send_cmd(mmc, &cmd, &data);
784 
785 	return err;
786 }
787 
__mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value,bool send_status)788 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
789 			bool send_status)
790 {
791 	unsigned int status, start;
792 	struct mmc_cmd cmd;
793 	int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
794 	bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
795 			      (index == EXT_CSD_PART_CONF);
796 	int ret;
797 
798 	if (mmc->gen_cmd6_time)
799 		timeout_ms = mmc->gen_cmd6_time * 10;
800 
801 	if (is_part_switch  && mmc->part_switch_time)
802 		timeout_ms = mmc->part_switch_time * 10;
803 
804 	cmd.cmdidx = MMC_CMD_SWITCH;
805 	cmd.resp_type = MMC_RSP_R1b;
806 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
807 				 (index << 16) |
808 				 (value << 8);
809 
810 	ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
811 	if (ret)
812 		return ret;
813 
814 	start = get_timer(0);
815 
816 	/* poll dat0 for rdy/buys status */
817 	ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
818 	if (ret && ret != -ENOSYS)
819 		return ret;
820 
821 	/*
822 	 * In cases when not allowed to poll by using CMD13 or because we aren't
823 	 * capable of polling by using mmc_wait_dat0, then rely on waiting the
824 	 * stated timeout to be sufficient.
825 	 */
826 	if (ret == -ENOSYS && !send_status) {
827 		mdelay(timeout_ms);
828 		return 0;
829 	}
830 
831 	/* Finally wait until the card is ready or indicates a failure
832 	 * to switch. It doesn't hurt to use CMD13 here even if send_status
833 	 * is false, because by now (after 'timeout_ms' ms) the bus should be
834 	 * reliable.
835 	 */
836 	do {
837 		ret = mmc_send_status(mmc, &status);
838 
839 		if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
840 			pr_debug("switch failed %d/%d/0x%x !\n", set, index,
841 				 value);
842 			return -EIO;
843 		}
844 		if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
845 		    (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
846 			return 0;
847 		udelay(100);
848 	} while (get_timer(start) < timeout_ms);
849 
850 	return -ETIMEDOUT;
851 }
852 
mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value)853 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
854 {
855 	return __mmc_switch(mmc, set, index, value, true);
856 }
857 
mmc_boot_wp(struct mmc * mmc)858 int mmc_boot_wp(struct mmc *mmc)
859 {
860 	return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
861 }
862 
863 #if !CONFIG_IS_ENABLED(MMC_TINY)
mmc_set_card_speed(struct mmc * mmc,enum bus_mode mode,bool hsdowngrade)864 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
865 			      bool hsdowngrade)
866 {
867 	int err;
868 	int speed_bits;
869 
870 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
871 
872 	switch (mode) {
873 	case MMC_HS:
874 	case MMC_HS_52:
875 	case MMC_DDR_52:
876 		speed_bits = EXT_CSD_TIMING_HS;
877 		break;
878 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
879 	case MMC_HS_200:
880 		speed_bits = EXT_CSD_TIMING_HS200;
881 		break;
882 #endif
883 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
884 	case MMC_HS_400:
885 		speed_bits = EXT_CSD_TIMING_HS400;
886 		break;
887 #endif
888 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
889 	case MMC_HS_400_ES:
890 		speed_bits = EXT_CSD_TIMING_HS400;
891 		break;
892 #endif
893 	case MMC_LEGACY:
894 		speed_bits = EXT_CSD_TIMING_LEGACY;
895 		break;
896 	default:
897 		return -EINVAL;
898 	}
899 
900 	err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
901 			   speed_bits, !hsdowngrade);
902 	if (err)
903 		return err;
904 
905 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
906     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
907 	/*
908 	 * In case the eMMC is in HS200/HS400 mode and we are downgrading
909 	 * to HS mode, the card clock are still running much faster than
910 	 * the supported HS mode clock, so we can not reliably read out
911 	 * Extended CSD. Reconfigure the controller to run at HS mode.
912 	 */
913 	if (hsdowngrade) {
914 		mmc_select_mode(mmc, MMC_HS);
915 		mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
916 	}
917 #endif
918 
919 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
920 		/* Now check to see that it worked */
921 		err = mmc_send_ext_csd(mmc, test_csd);
922 		if (err)
923 			return err;
924 
925 		/* No high-speed support */
926 		if (!test_csd[EXT_CSD_HS_TIMING])
927 			return -ENOTSUPP;
928 	}
929 
930 	return 0;
931 }
932 
mmc_get_capabilities(struct mmc * mmc)933 static int mmc_get_capabilities(struct mmc *mmc)
934 {
935 	u8 *ext_csd = mmc->ext_csd;
936 	char cardtype;
937 
938 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
939 
940 	if (mmc_host_is_spi(mmc))
941 		return 0;
942 
943 	/* Only version 4 supports high-speed */
944 	if (mmc->version < MMC_VERSION_4)
945 		return 0;
946 
947 	if (!ext_csd) {
948 		pr_err("No ext_csd found!\n"); /* this should enver happen */
949 		return -ENOTSUPP;
950 	}
951 
952 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
953 
954 	cardtype = ext_csd[EXT_CSD_CARD_TYPE];
955 	mmc->cardtype = cardtype;
956 
957 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
958 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
959 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
960 		mmc->card_caps |= MMC_MODE_HS200;
961 	}
962 #endif
963 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
964 	CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
965 	if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
966 			EXT_CSD_CARD_TYPE_HS400_1_8V)) {
967 		mmc->card_caps |= MMC_MODE_HS400;
968 	}
969 #endif
970 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
971 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
972 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
973 		mmc->card_caps |= MMC_MODE_HS_52MHz;
974 	}
975 	if (cardtype & EXT_CSD_CARD_TYPE_26)
976 		mmc->card_caps |= MMC_MODE_HS;
977 
978 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
979 	if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
980 	    (mmc->card_caps & MMC_MODE_HS400)) {
981 		mmc->card_caps |= MMC_MODE_HS400_ES;
982 	}
983 #endif
984 
985 	return 0;
986 }
987 #endif
988 
mmc_set_capacity(struct mmc * mmc,int part_num)989 static int mmc_set_capacity(struct mmc *mmc, int part_num)
990 {
991 	switch (part_num) {
992 	case 0:
993 		mmc->capacity = mmc->capacity_user;
994 		break;
995 	case 1:
996 	case 2:
997 		mmc->capacity = mmc->capacity_boot;
998 		break;
999 	case 3:
1000 		mmc->capacity = mmc->capacity_rpmb;
1001 		break;
1002 	case 4:
1003 	case 5:
1004 	case 6:
1005 	case 7:
1006 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1007 		break;
1008 	default:
1009 		return -1;
1010 	}
1011 
1012 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1013 
1014 	return 0;
1015 }
1016 
mmc_switch_part(struct mmc * mmc,unsigned int part_num)1017 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1018 {
1019 	int ret;
1020 	int retry = 3;
1021 
1022 	do {
1023 		ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1024 				 EXT_CSD_PART_CONF,
1025 				 (mmc->part_config & ~PART_ACCESS_MASK)
1026 				 | (part_num & PART_ACCESS_MASK));
1027 	} while (ret && retry--);
1028 
1029 	/*
1030 	 * Set the capacity if the switch succeeded or was intended
1031 	 * to return to representing the raw device.
1032 	 */
1033 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1034 		ret = mmc_set_capacity(mmc, part_num);
1035 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1036 	}
1037 
1038 	return ret;
1039 }
1040 
1041 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
mmc_hwpart_config(struct mmc * mmc,const struct mmc_hwpart_conf * conf,enum mmc_hwpart_conf_mode mode)1042 int mmc_hwpart_config(struct mmc *mmc,
1043 		      const struct mmc_hwpart_conf *conf,
1044 		      enum mmc_hwpart_conf_mode mode)
1045 {
1046 	u8 part_attrs = 0;
1047 	u32 enh_size_mult;
1048 	u32 enh_start_addr;
1049 	u32 gp_size_mult[4];
1050 	u32 max_enh_size_mult;
1051 	u32 tot_enh_size_mult = 0;
1052 	u8 wr_rel_set;
1053 	int i, pidx, err;
1054 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1055 
1056 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1057 		return -EINVAL;
1058 
1059 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1060 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1061 		return -EMEDIUMTYPE;
1062 	}
1063 
1064 	if (!(mmc->part_support & PART_SUPPORT)) {
1065 		pr_err("Card does not support partitioning\n");
1066 		return -EMEDIUMTYPE;
1067 	}
1068 
1069 	if (!mmc->hc_wp_grp_size) {
1070 		pr_err("Card does not define HC WP group size\n");
1071 		return -EMEDIUMTYPE;
1072 	}
1073 
1074 	/* check partition alignment and total enhanced size */
1075 	if (conf->user.enh_size) {
1076 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1077 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1078 			pr_err("User data enhanced area not HC WP group "
1079 			       "size aligned\n");
1080 			return -EINVAL;
1081 		}
1082 		part_attrs |= EXT_CSD_ENH_USR;
1083 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1084 		if (mmc->high_capacity) {
1085 			enh_start_addr = conf->user.enh_start;
1086 		} else {
1087 			enh_start_addr = (conf->user.enh_start << 9);
1088 		}
1089 	} else {
1090 		enh_size_mult = 0;
1091 		enh_start_addr = 0;
1092 	}
1093 	tot_enh_size_mult += enh_size_mult;
1094 
1095 	for (pidx = 0; pidx < 4; pidx++) {
1096 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1097 			pr_err("GP%i partition not HC WP group size "
1098 			       "aligned\n", pidx+1);
1099 			return -EINVAL;
1100 		}
1101 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1102 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1103 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1104 			tot_enh_size_mult += gp_size_mult[pidx];
1105 		}
1106 	}
1107 
1108 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1109 		pr_err("Card does not support enhanced attribute\n");
1110 		return -EMEDIUMTYPE;
1111 	}
1112 
1113 	err = mmc_send_ext_csd(mmc, ext_csd);
1114 	if (err)
1115 		return err;
1116 
1117 	max_enh_size_mult =
1118 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1119 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1120 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1121 	if (tot_enh_size_mult > max_enh_size_mult) {
1122 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1123 		       tot_enh_size_mult, max_enh_size_mult);
1124 		return -EMEDIUMTYPE;
1125 	}
1126 
1127 	/* The default value of EXT_CSD_WR_REL_SET is device
1128 	 * dependent, the values can only be changed if the
1129 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1130 	 * changed only once and before partitioning is completed. */
1131 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1132 	if (conf->user.wr_rel_change) {
1133 		if (conf->user.wr_rel_set)
1134 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1135 		else
1136 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1137 	}
1138 	for (pidx = 0; pidx < 4; pidx++) {
1139 		if (conf->gp_part[pidx].wr_rel_change) {
1140 			if (conf->gp_part[pidx].wr_rel_set)
1141 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1142 			else
1143 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1144 		}
1145 	}
1146 
1147 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1148 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1149 		puts("Card does not support host controlled partition write "
1150 		     "reliability settings\n");
1151 		return -EMEDIUMTYPE;
1152 	}
1153 
1154 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1155 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1156 		pr_err("Card already partitioned\n");
1157 		return -EPERM;
1158 	}
1159 
1160 	if (mode == MMC_HWPART_CONF_CHECK)
1161 		return 0;
1162 
1163 	/* Partitioning requires high-capacity size definitions */
1164 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1165 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1166 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1167 
1168 		if (err)
1169 			return err;
1170 
1171 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1172 
1173 #if CONFIG_IS_ENABLED(MMC_WRITE)
1174 		/* update erase group size to be high-capacity */
1175 		mmc->erase_grp_size =
1176 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1177 #endif
1178 
1179 	}
1180 
1181 	/* all OK, write the configuration */
1182 	for (i = 0; i < 4; i++) {
1183 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 				 EXT_CSD_ENH_START_ADDR+i,
1185 				 (enh_start_addr >> (i*8)) & 0xFF);
1186 		if (err)
1187 			return err;
1188 	}
1189 	for (i = 0; i < 3; i++) {
1190 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1191 				 EXT_CSD_ENH_SIZE_MULT+i,
1192 				 (enh_size_mult >> (i*8)) & 0xFF);
1193 		if (err)
1194 			return err;
1195 	}
1196 	for (pidx = 0; pidx < 4; pidx++) {
1197 		for (i = 0; i < 3; i++) {
1198 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1200 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1201 			if (err)
1202 				return err;
1203 		}
1204 	}
1205 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1206 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1207 	if (err)
1208 		return err;
1209 
1210 	if (mode == MMC_HWPART_CONF_SET)
1211 		return 0;
1212 
1213 	/* The WR_REL_SET is a write-once register but shall be
1214 	 * written before setting PART_SETTING_COMPLETED. As it is
1215 	 * write-once we can only write it when completing the
1216 	 * partitioning. */
1217 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1218 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1219 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1220 		if (err)
1221 			return err;
1222 	}
1223 
1224 	/* Setting PART_SETTING_COMPLETED confirms the partition
1225 	 * configuration but it only becomes effective after power
1226 	 * cycle, so we do not adjust the partition related settings
1227 	 * in the mmc struct. */
1228 
1229 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1230 			 EXT_CSD_PARTITION_SETTING,
1231 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1232 	if (err)
1233 		return err;
1234 
1235 	return 0;
1236 }
1237 #endif
1238 
1239 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_getcd(struct mmc * mmc)1240 int mmc_getcd(struct mmc *mmc)
1241 {
1242 	int cd;
1243 
1244 	cd = board_mmc_getcd(mmc);
1245 
1246 	if (cd < 0) {
1247 		if (mmc->cfg->ops->getcd)
1248 			cd = mmc->cfg->ops->getcd(mmc);
1249 		else
1250 			cd = 1;
1251 	}
1252 
1253 	return cd;
1254 }
1255 #endif
1256 
1257 #if !CONFIG_IS_ENABLED(MMC_TINY)
sd_switch(struct mmc * mmc,int mode,int group,u8 value,u8 * resp)1258 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1259 {
1260 	struct mmc_cmd cmd;
1261 	struct mmc_data data;
1262 
1263 	/* Switch the frequency */
1264 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1265 	cmd.resp_type = MMC_RSP_R1;
1266 	cmd.cmdarg = (mode << 31) | 0xffffff;
1267 	cmd.cmdarg &= ~(0xf << (group * 4));
1268 	cmd.cmdarg |= value << (group * 4);
1269 
1270 	data.dest = (char *)resp;
1271 	data.blocksize = 64;
1272 	data.blocks = 1;
1273 	data.flags = MMC_DATA_READ;
1274 
1275 	return mmc_send_cmd(mmc, &cmd, &data);
1276 }
1277 
sd_get_capabilities(struct mmc * mmc)1278 static int sd_get_capabilities(struct mmc *mmc)
1279 {
1280 	int err;
1281 	struct mmc_cmd cmd;
1282 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1283 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1284 	struct mmc_data data;
1285 	int timeout;
1286 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1287 	u32 sd3_bus_mode;
1288 #endif
1289 
1290 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1291 
1292 	if (mmc_host_is_spi(mmc))
1293 		return 0;
1294 
1295 	/* Read the SCR to find out if this card supports higher speeds */
1296 	cmd.cmdidx = MMC_CMD_APP_CMD;
1297 	cmd.resp_type = MMC_RSP_R1;
1298 	cmd.cmdarg = mmc->rca << 16;
1299 
1300 	err = mmc_send_cmd(mmc, &cmd, NULL);
1301 
1302 	if (err)
1303 		return err;
1304 
1305 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1306 	cmd.resp_type = MMC_RSP_R1;
1307 	cmd.cmdarg = 0;
1308 
1309 	data.dest = (char *)scr;
1310 	data.blocksize = 8;
1311 	data.blocks = 1;
1312 	data.flags = MMC_DATA_READ;
1313 
1314 	err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1315 
1316 	if (err)
1317 		return err;
1318 
1319 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1320 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1321 
1322 	switch ((mmc->scr[0] >> 24) & 0xf) {
1323 	case 0:
1324 		mmc->version = SD_VERSION_1_0;
1325 		break;
1326 	case 1:
1327 		mmc->version = SD_VERSION_1_10;
1328 		break;
1329 	case 2:
1330 		mmc->version = SD_VERSION_2;
1331 		if ((mmc->scr[0] >> 15) & 0x1)
1332 			mmc->version = SD_VERSION_3;
1333 		break;
1334 	default:
1335 		mmc->version = SD_VERSION_1_0;
1336 		break;
1337 	}
1338 
1339 	if (mmc->scr[0] & SD_DATA_4BIT)
1340 		mmc->card_caps |= MMC_MODE_4BIT;
1341 
1342 	/* Version 1.0 doesn't support switching */
1343 	if (mmc->version == SD_VERSION_1_0)
1344 		return 0;
1345 
1346 	timeout = 4;
1347 	while (timeout--) {
1348 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1349 				(u8 *)switch_status);
1350 
1351 		if (err)
1352 			return err;
1353 
1354 		/* The high-speed function is busy.  Try again */
1355 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1356 			break;
1357 	}
1358 
1359 	/* If high-speed isn't supported, we return */
1360 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1361 		mmc->card_caps |= MMC_CAP(SD_HS);
1362 
1363 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1364 	/* Version before 3.0 don't support UHS modes */
1365 	if (mmc->version < SD_VERSION_3)
1366 		return 0;
1367 
1368 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1369 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1370 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1371 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1372 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1373 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1374 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1375 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1376 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1377 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1378 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1379 #endif
1380 
1381 	return 0;
1382 }
1383 
sd_set_card_speed(struct mmc * mmc,enum bus_mode mode)1384 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1385 {
1386 	int err;
1387 
1388 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1389 	int speed;
1390 
1391 	/* SD version 1.00 and 1.01 does not support CMD 6 */
1392 	if (mmc->version == SD_VERSION_1_0)
1393 		return 0;
1394 
1395 	switch (mode) {
1396 	case MMC_LEGACY:
1397 		speed = UHS_SDR12_BUS_SPEED;
1398 		break;
1399 	case SD_HS:
1400 		speed = HIGH_SPEED_BUS_SPEED;
1401 		break;
1402 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1403 	case UHS_SDR12:
1404 		speed = UHS_SDR12_BUS_SPEED;
1405 		break;
1406 	case UHS_SDR25:
1407 		speed = UHS_SDR25_BUS_SPEED;
1408 		break;
1409 	case UHS_SDR50:
1410 		speed = UHS_SDR50_BUS_SPEED;
1411 		break;
1412 	case UHS_DDR50:
1413 		speed = UHS_DDR50_BUS_SPEED;
1414 		break;
1415 	case UHS_SDR104:
1416 		speed = UHS_SDR104_BUS_SPEED;
1417 		break;
1418 #endif
1419 	default:
1420 		return -EINVAL;
1421 	}
1422 
1423 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1424 	if (err)
1425 		return err;
1426 
1427 	if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1428 		return -ENOTSUPP;
1429 
1430 	return 0;
1431 }
1432 
sd_select_bus_width(struct mmc * mmc,int w)1433 static int sd_select_bus_width(struct mmc *mmc, int w)
1434 {
1435 	int err;
1436 	struct mmc_cmd cmd;
1437 
1438 	if ((w != 4) && (w != 1))
1439 		return -EINVAL;
1440 
1441 	cmd.cmdidx = MMC_CMD_APP_CMD;
1442 	cmd.resp_type = MMC_RSP_R1;
1443 	cmd.cmdarg = mmc->rca << 16;
1444 
1445 	err = mmc_send_cmd(mmc, &cmd, NULL);
1446 	if (err)
1447 		return err;
1448 
1449 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1450 	cmd.resp_type = MMC_RSP_R1;
1451 	if (w == 4)
1452 		cmd.cmdarg = 2;
1453 	else if (w == 1)
1454 		cmd.cmdarg = 0;
1455 	err = mmc_send_cmd(mmc, &cmd, NULL);
1456 	if (err)
1457 		return err;
1458 
1459 	return 0;
1460 }
1461 #endif
1462 
1463 #if CONFIG_IS_ENABLED(MMC_WRITE)
sd_read_ssr(struct mmc * mmc)1464 static int sd_read_ssr(struct mmc *mmc)
1465 {
1466 	static const unsigned int sd_au_size[] = {
1467 		0,		SZ_16K / 512,		SZ_32K / 512,
1468 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1469 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1470 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1471 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1472 		SZ_64M / 512,
1473 	};
1474 	int err, i;
1475 	struct mmc_cmd cmd;
1476 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1477 	struct mmc_data data;
1478 	unsigned int au, eo, et, es;
1479 
1480 	cmd.cmdidx = MMC_CMD_APP_CMD;
1481 	cmd.resp_type = MMC_RSP_R1;
1482 	cmd.cmdarg = mmc->rca << 16;
1483 
1484 	err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1485 	if (err)
1486 		return err;
1487 
1488 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1489 	cmd.resp_type = MMC_RSP_R1;
1490 	cmd.cmdarg = 0;
1491 
1492 	data.dest = (char *)ssr;
1493 	data.blocksize = 64;
1494 	data.blocks = 1;
1495 	data.flags = MMC_DATA_READ;
1496 
1497 	err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1498 	if (err)
1499 		return err;
1500 
1501 	for (i = 0; i < 16; i++)
1502 		ssr[i] = be32_to_cpu(ssr[i]);
1503 
1504 	au = (ssr[2] >> 12) & 0xF;
1505 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1506 		mmc->ssr.au = sd_au_size[au];
1507 		es = (ssr[3] >> 24) & 0xFF;
1508 		es |= (ssr[2] & 0xFF) << 8;
1509 		et = (ssr[3] >> 18) & 0x3F;
1510 		if (es && et) {
1511 			eo = (ssr[3] >> 16) & 0x3;
1512 			mmc->ssr.erase_timeout = (et * 1000) / es;
1513 			mmc->ssr.erase_offset = eo * 1000;
1514 		}
1515 	} else {
1516 		pr_debug("Invalid Allocation Unit Size.\n");
1517 	}
1518 
1519 	return 0;
1520 }
1521 #endif
1522 /* frequency bases */
1523 /* divided by 10 to be nice to platforms without floating point */
1524 static const int fbase[] = {
1525 	10000,
1526 	100000,
1527 	1000000,
1528 	10000000,
1529 };
1530 
1531 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1532  * to platforms without floating point.
1533  */
1534 static const u8 multipliers[] = {
1535 	0,	/* reserved */
1536 	10,
1537 	12,
1538 	13,
1539 	15,
1540 	20,
1541 	25,
1542 	30,
1543 	35,
1544 	40,
1545 	45,
1546 	50,
1547 	55,
1548 	60,
1549 	70,
1550 	80,
1551 };
1552 
bus_width(uint cap)1553 static inline int bus_width(uint cap)
1554 {
1555 	if (cap == MMC_MODE_8BIT)
1556 		return 8;
1557 	if (cap == MMC_MODE_4BIT)
1558 		return 4;
1559 	if (cap == MMC_MODE_1BIT)
1560 		return 1;
1561 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1562 	return 0;
1563 }
1564 
1565 #if !CONFIG_IS_ENABLED(DM_MMC)
1566 #ifdef MMC_SUPPORTS_TUNING
mmc_execute_tuning(struct mmc * mmc,uint opcode)1567 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1568 {
1569 	return -ENOTSUPP;
1570 }
1571 #endif
1572 
mmc_set_ios(struct mmc * mmc)1573 static int mmc_set_ios(struct mmc *mmc)
1574 {
1575 	int ret = 0;
1576 
1577 	if (mmc->cfg->ops->set_ios)
1578 		ret = mmc->cfg->ops->set_ios(mmc);
1579 
1580 	return ret;
1581 }
1582 
mmc_host_power_cycle(struct mmc * mmc)1583 static int mmc_host_power_cycle(struct mmc *mmc)
1584 {
1585 	int ret = 0;
1586 
1587 	if (mmc->cfg->ops->host_power_cycle)
1588 		ret = mmc->cfg->ops->host_power_cycle(mmc);
1589 
1590 	return ret;
1591 }
1592 #endif
1593 
mmc_set_clock(struct mmc * mmc,uint clock,bool disable)1594 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1595 {
1596 	if (!disable) {
1597 		if (clock > mmc->cfg->f_max)
1598 			clock = mmc->cfg->f_max;
1599 
1600 		if (clock < mmc->cfg->f_min)
1601 			clock = mmc->cfg->f_min;
1602 	}
1603 
1604 	mmc->clock = clock;
1605 	mmc->clk_disable = disable;
1606 
1607 	debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1608 
1609 	return mmc_set_ios(mmc);
1610 }
1611 
mmc_set_bus_width(struct mmc * mmc,uint width)1612 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1613 {
1614 	mmc->bus_width = width;
1615 
1616 	return mmc_set_ios(mmc);
1617 }
1618 
1619 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1620 /*
1621  * helper function to display the capabilities in a human
1622  * friendly manner. The capabilities include bus width and
1623  * supported modes.
1624  */
mmc_dump_capabilities(const char * text,uint caps)1625 void mmc_dump_capabilities(const char *text, uint caps)
1626 {
1627 	enum bus_mode mode;
1628 
1629 	pr_debug("%s: widths [", text);
1630 	if (caps & MMC_MODE_8BIT)
1631 		pr_debug("8, ");
1632 	if (caps & MMC_MODE_4BIT)
1633 		pr_debug("4, ");
1634 	if (caps & MMC_MODE_1BIT)
1635 		pr_debug("1, ");
1636 	pr_debug("\b\b] modes [");
1637 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1638 		if (MMC_CAP(mode) & caps)
1639 			pr_debug("%s, ", mmc_mode_name(mode));
1640 	pr_debug("\b\b]\n");
1641 }
1642 #endif
1643 
1644 struct mode_width_tuning {
1645 	enum bus_mode mode;
1646 	uint widths;
1647 #ifdef MMC_SUPPORTS_TUNING
1648 	uint tuning;
1649 #endif
1650 };
1651 
1652 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_voltage_to_mv(enum mmc_voltage voltage)1653 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1654 {
1655 	switch (voltage) {
1656 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1657 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1658 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1659 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1660 	}
1661 	return -EINVAL;
1662 }
1663 
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1664 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1665 {
1666 	int err;
1667 
1668 	if (mmc->signal_voltage == signal_voltage)
1669 		return 0;
1670 
1671 	mmc->signal_voltage = signal_voltage;
1672 	err = mmc_set_ios(mmc);
1673 	if (err)
1674 		pr_debug("unable to set voltage (err %d)\n", err);
1675 
1676 	return err;
1677 }
1678 #else
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1679 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1680 {
1681 	return 0;
1682 }
1683 #endif
1684 
1685 #if !CONFIG_IS_ENABLED(MMC_TINY)
1686 static const struct mode_width_tuning sd_modes_by_pref[] = {
1687 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1688 #ifdef MMC_SUPPORTS_TUNING
1689 	{
1690 		.mode = UHS_SDR104,
1691 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1692 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1693 	},
1694 #endif
1695 	{
1696 		.mode = UHS_SDR50,
1697 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 	},
1699 	{
1700 		.mode = UHS_DDR50,
1701 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1702 	},
1703 	{
1704 		.mode = UHS_SDR25,
1705 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 	},
1707 #endif
1708 	{
1709 		.mode = SD_HS,
1710 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1711 	},
1712 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1713 	{
1714 		.mode = UHS_SDR12,
1715 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1716 	},
1717 #endif
1718 	{
1719 		.mode = MMC_LEGACY,
1720 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1721 	}
1722 };
1723 
1724 #define for_each_sd_mode_by_pref(caps, mwt) \
1725 	for (mwt = sd_modes_by_pref;\
1726 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1727 	     mwt++) \
1728 		if (caps & MMC_CAP(mwt->mode))
1729 
sd_select_mode_and_width(struct mmc * mmc,uint card_caps)1730 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1731 {
1732 	int err;
1733 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1734 	const struct mode_width_tuning *mwt;
1735 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1736 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1737 #else
1738 	bool uhs_en = false;
1739 #endif
1740 	uint caps;
1741 
1742 #ifdef DEBUG
1743 	mmc_dump_capabilities("sd card", card_caps);
1744 	mmc_dump_capabilities("host", mmc->host_caps);
1745 #endif
1746 
1747 	if (mmc_host_is_spi(mmc)) {
1748 		mmc_set_bus_width(mmc, 1);
1749 		mmc_select_mode(mmc, MMC_LEGACY);
1750 		mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1751 #if CONFIG_IS_ENABLED(MMC_WRITE)
1752 		err = sd_read_ssr(mmc);
1753 		if (err)
1754 			pr_warn("unable to read ssr\n");
1755 #endif
1756 		return 0;
1757 	}
1758 
1759 	/* Restrict card's capabilities by what the host can do */
1760 	caps = card_caps & mmc->host_caps;
1761 
1762 	if (!uhs_en)
1763 		caps &= ~UHS_CAPS;
1764 
1765 	for_each_sd_mode_by_pref(caps, mwt) {
1766 		uint *w;
1767 
1768 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1769 			if (*w & caps & mwt->widths) {
1770 				pr_debug("trying mode %s width %d (at %d MHz)\n",
1771 					 mmc_mode_name(mwt->mode),
1772 					 bus_width(*w),
1773 					 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1774 
1775 				/* configure the bus width (card + host) */
1776 				err = sd_select_bus_width(mmc, bus_width(*w));
1777 				if (err)
1778 					goto error;
1779 				mmc_set_bus_width(mmc, bus_width(*w));
1780 
1781 				/* configure the bus mode (card) */
1782 				err = sd_set_card_speed(mmc, mwt->mode);
1783 				if (err)
1784 					goto error;
1785 
1786 				/* configure the bus mode (host) */
1787 				mmc_select_mode(mmc, mwt->mode);
1788 				mmc_set_clock(mmc, mmc->tran_speed,
1789 						MMC_CLK_ENABLE);
1790 
1791 #ifdef MMC_SUPPORTS_TUNING
1792 				/* execute tuning if needed */
1793 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1794 					err = mmc_execute_tuning(mmc,
1795 								 mwt->tuning);
1796 					if (err) {
1797 						pr_debug("tuning failed\n");
1798 						goto error;
1799 					}
1800 				}
1801 #endif
1802 
1803 #if CONFIG_IS_ENABLED(MMC_WRITE)
1804 				err = sd_read_ssr(mmc);
1805 				if (err)
1806 					pr_warn("unable to read ssr\n");
1807 #endif
1808 				if (!err)
1809 					return 0;
1810 
1811 error:
1812 				/* revert to a safer bus speed */
1813 				mmc_select_mode(mmc, MMC_LEGACY);
1814 				mmc_set_clock(mmc, mmc->tran_speed,
1815 						MMC_CLK_ENABLE);
1816 			}
1817 		}
1818 	}
1819 
1820 	pr_err("unable to select a mode\n");
1821 	return -ENOTSUPP;
1822 }
1823 
1824 /*
1825  * read the compare the part of ext csd that is constant.
1826  * This can be used to check that the transfer is working
1827  * as expected.
1828  */
mmc_read_and_compare_ext_csd(struct mmc * mmc)1829 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1830 {
1831 	int err;
1832 	const u8 *ext_csd = mmc->ext_csd;
1833 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1834 
1835 	if (mmc->version < MMC_VERSION_4)
1836 		return 0;
1837 
1838 	err = mmc_send_ext_csd(mmc, test_csd);
1839 	if (err)
1840 		return err;
1841 
1842 	/* Only compare read only fields */
1843 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1844 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1845 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1846 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1847 	    ext_csd[EXT_CSD_REV]
1848 		== test_csd[EXT_CSD_REV] &&
1849 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1850 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1851 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1852 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1853 		return 0;
1854 
1855 	return -EBADMSG;
1856 }
1857 
1858 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1859 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1860 				  uint32_t allowed_mask)
1861 {
1862 	u32 card_mask = 0;
1863 
1864 	switch (mode) {
1865 	case MMC_HS_400_ES:
1866 	case MMC_HS_400:
1867 	case MMC_HS_200:
1868 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1869 		    EXT_CSD_CARD_TYPE_HS400_1_8V))
1870 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1871 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1872 		    EXT_CSD_CARD_TYPE_HS400_1_2V))
1873 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1874 		break;
1875 	case MMC_DDR_52:
1876 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1877 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1878 				     MMC_SIGNAL_VOLTAGE_180;
1879 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1880 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1881 		break;
1882 	default:
1883 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1884 		break;
1885 	}
1886 
1887 	while (card_mask & allowed_mask) {
1888 		enum mmc_voltage best_match;
1889 
1890 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1891 		if (!mmc_set_signal_voltage(mmc,  best_match))
1892 			return 0;
1893 
1894 		allowed_mask &= ~best_match;
1895 	}
1896 
1897 	return -ENOTSUPP;
1898 }
1899 #else
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1900 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1901 					 uint32_t allowed_mask)
1902 {
1903 	return 0;
1904 }
1905 #endif
1906 
1907 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1908 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1909 	{
1910 		.mode = MMC_HS_400_ES,
1911 		.widths = MMC_MODE_8BIT,
1912 	},
1913 #endif
1914 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1915 	{
1916 		.mode = MMC_HS_400,
1917 		.widths = MMC_MODE_8BIT,
1918 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1919 	},
1920 #endif
1921 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1922 	{
1923 		.mode = MMC_HS_200,
1924 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1925 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1926 	},
1927 #endif
1928 	{
1929 		.mode = MMC_DDR_52,
1930 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1931 	},
1932 	{
1933 		.mode = MMC_HS_52,
1934 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1935 	},
1936 	{
1937 		.mode = MMC_HS,
1938 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1939 	},
1940 	{
1941 		.mode = MMC_LEGACY,
1942 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1943 	}
1944 };
1945 
1946 #define for_each_mmc_mode_by_pref(caps, mwt) \
1947 	for (mwt = mmc_modes_by_pref;\
1948 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1949 	    mwt++) \
1950 		if (caps & MMC_CAP(mwt->mode))
1951 
1952 static const struct ext_csd_bus_width {
1953 	uint cap;
1954 	bool is_ddr;
1955 	uint ext_csd_bits;
1956 } ext_csd_bus_width[] = {
1957 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1958 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1959 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1960 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1961 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1962 };
1963 
1964 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
mmc_select_hs400(struct mmc * mmc)1965 static int mmc_select_hs400(struct mmc *mmc)
1966 {
1967 	int err;
1968 
1969 	/* Set timing to HS200 for tuning */
1970 	err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1971 	if (err)
1972 		return err;
1973 
1974 	/* configure the bus mode (host) */
1975 	mmc_select_mode(mmc, MMC_HS_200);
1976 	mmc_set_clock(mmc, mmc->tran_speed, false);
1977 
1978 	/* execute tuning if needed */
1979 	mmc->hs400_tuning = 1;
1980 	err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1981 	mmc->hs400_tuning = 0;
1982 	if (err) {
1983 		debug("tuning failed\n");
1984 		return err;
1985 	}
1986 
1987 	/* Set back to HS */
1988 	mmc_set_card_speed(mmc, MMC_HS, true);
1989 
1990 	err = mmc_hs400_prepare_ddr(mmc);
1991 	if (err)
1992 		return err;
1993 
1994 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1995 			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1996 	if (err)
1997 		return err;
1998 
1999 	err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2000 	if (err)
2001 		return err;
2002 
2003 	mmc_select_mode(mmc, MMC_HS_400);
2004 	err = mmc_set_clock(mmc, mmc->tran_speed, false);
2005 	if (err)
2006 		return err;
2007 
2008 	return 0;
2009 }
2010 #else
mmc_select_hs400(struct mmc * mmc)2011 static int mmc_select_hs400(struct mmc *mmc)
2012 {
2013 	return -ENOTSUPP;
2014 }
2015 #endif
2016 
2017 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2018 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_set_enhanced_strobe(struct mmc * mmc)2019 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2020 {
2021 	return -ENOTSUPP;
2022 }
2023 #endif
mmc_select_hs400es(struct mmc * mmc)2024 static int mmc_select_hs400es(struct mmc *mmc)
2025 {
2026 	int err;
2027 
2028 	err = mmc_set_card_speed(mmc, MMC_HS, true);
2029 	if (err)
2030 		return err;
2031 
2032 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2033 			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2034 			 EXT_CSD_BUS_WIDTH_STROBE);
2035 	if (err) {
2036 		printf("switch to bus width for hs400 failed\n");
2037 		return err;
2038 	}
2039 	/* TODO: driver strength */
2040 	err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2041 	if (err)
2042 		return err;
2043 
2044 	mmc_select_mode(mmc, MMC_HS_400_ES);
2045 	err = mmc_set_clock(mmc, mmc->tran_speed, false);
2046 	if (err)
2047 		return err;
2048 
2049 	return mmc_set_enhanced_strobe(mmc);
2050 }
2051 #else
mmc_select_hs400es(struct mmc * mmc)2052 static int mmc_select_hs400es(struct mmc *mmc)
2053 {
2054 	return -ENOTSUPP;
2055 }
2056 #endif
2057 
2058 #define for_each_supported_width(caps, ddr, ecbv) \
2059 	for (ecbv = ext_csd_bus_width;\
2060 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2061 	    ecbv++) \
2062 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2063 
mmc_select_mode_and_width(struct mmc * mmc,uint card_caps)2064 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2065 {
2066 	int err = 0;
2067 	const struct mode_width_tuning *mwt;
2068 	const struct ext_csd_bus_width *ecbw;
2069 
2070 #ifdef DEBUG
2071 	mmc_dump_capabilities("mmc", card_caps);
2072 	mmc_dump_capabilities("host", mmc->host_caps);
2073 #endif
2074 
2075 	if (mmc_host_is_spi(mmc)) {
2076 		mmc_set_bus_width(mmc, 1);
2077 		mmc_select_mode(mmc, MMC_LEGACY);
2078 		mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2079 		return 0;
2080 	}
2081 
2082 	/* Restrict card's capabilities by what the host can do */
2083 	card_caps &= mmc->host_caps;
2084 
2085 	/* Only version 4 of MMC supports wider bus widths */
2086 	if (mmc->version < MMC_VERSION_4)
2087 		return 0;
2088 
2089 	if (!mmc->ext_csd) {
2090 		pr_debug("No ext_csd found!\n"); /* this should enver happen */
2091 		return -ENOTSUPP;
2092 	}
2093 
2094 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2095     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2096 	/*
2097 	 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2098 	 * before doing anything else, since a transition from either of
2099 	 * the HS200/HS400 mode directly to legacy mode is not supported.
2100 	 */
2101 	if (mmc->selected_mode == MMC_HS_200 ||
2102 	    mmc->selected_mode == MMC_HS_400)
2103 		mmc_set_card_speed(mmc, MMC_HS, true);
2104 	else
2105 #endif
2106 		mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2107 
2108 	for_each_mmc_mode_by_pref(card_caps, mwt) {
2109 		for_each_supported_width(card_caps & mwt->widths,
2110 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
2111 			enum mmc_voltage old_voltage;
2112 			pr_debug("trying mode %s width %d (at %d MHz)\n",
2113 				 mmc_mode_name(mwt->mode),
2114 				 bus_width(ecbw->cap),
2115 				 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2116 			old_voltage = mmc->signal_voltage;
2117 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
2118 						     MMC_ALL_SIGNAL_VOLTAGE);
2119 			if (err)
2120 				continue;
2121 
2122 			/* configure the bus width (card + host) */
2123 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2124 				    EXT_CSD_BUS_WIDTH,
2125 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2126 			if (err)
2127 				goto error;
2128 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2129 
2130 			if (mwt->mode == MMC_HS_400) {
2131 				err = mmc_select_hs400(mmc);
2132 				if (err) {
2133 					printf("Select HS400 failed %d\n", err);
2134 					goto error;
2135 				}
2136 			} else if (mwt->mode == MMC_HS_400_ES) {
2137 				err = mmc_select_hs400es(mmc);
2138 				if (err) {
2139 					printf("Select HS400ES failed %d\n",
2140 					       err);
2141 					goto error;
2142 				}
2143 			} else {
2144 				/* configure the bus speed (card) */
2145 				err = mmc_set_card_speed(mmc, mwt->mode, false);
2146 				if (err)
2147 					goto error;
2148 
2149 				/*
2150 				 * configure the bus width AND the ddr mode
2151 				 * (card). The host side will be taken care
2152 				 * of in the next step
2153 				 */
2154 				if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2155 					err = mmc_switch(mmc,
2156 							 EXT_CSD_CMD_SET_NORMAL,
2157 							 EXT_CSD_BUS_WIDTH,
2158 							 ecbw->ext_csd_bits);
2159 					if (err)
2160 						goto error;
2161 				}
2162 
2163 				/* configure the bus mode (host) */
2164 				mmc_select_mode(mmc, mwt->mode);
2165 				mmc_set_clock(mmc, mmc->tran_speed,
2166 					      MMC_CLK_ENABLE);
2167 #ifdef MMC_SUPPORTS_TUNING
2168 
2169 				/* execute tuning if needed */
2170 				if (mwt->tuning) {
2171 					err = mmc_execute_tuning(mmc,
2172 								 mwt->tuning);
2173 					if (err) {
2174 						pr_debug("tuning failed : %d\n", err);
2175 						goto error;
2176 					}
2177 				}
2178 #endif
2179 			}
2180 
2181 			/* do a transfer to check the configuration */
2182 			err = mmc_read_and_compare_ext_csd(mmc);
2183 			if (!err)
2184 				return 0;
2185 error:
2186 			mmc_set_signal_voltage(mmc, old_voltage);
2187 			/* if an error occurred, revert to a safer bus mode */
2188 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2189 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2190 			mmc_select_mode(mmc, MMC_LEGACY);
2191 			mmc_set_bus_width(mmc, 1);
2192 		}
2193 	}
2194 
2195 	pr_err("unable to select a mode : %d\n", err);
2196 
2197 	return -ENOTSUPP;
2198 }
2199 #endif
2200 
2201 #if CONFIG_IS_ENABLED(MMC_TINY)
2202 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2203 #endif
2204 
mmc_startup_v4(struct mmc * mmc)2205 static int mmc_startup_v4(struct mmc *mmc)
2206 {
2207 	int err, i;
2208 	u64 capacity;
2209 	bool has_parts = false;
2210 	bool part_completed;
2211 	static const u32 mmc_versions[] = {
2212 		MMC_VERSION_4,
2213 		MMC_VERSION_4_1,
2214 		MMC_VERSION_4_2,
2215 		MMC_VERSION_4_3,
2216 		MMC_VERSION_4_4,
2217 		MMC_VERSION_4_41,
2218 		MMC_VERSION_4_5,
2219 		MMC_VERSION_5_0,
2220 		MMC_VERSION_5_1
2221 	};
2222 
2223 #if CONFIG_IS_ENABLED(MMC_TINY)
2224 	u8 *ext_csd = ext_csd_bkup;
2225 
2226 	if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2227 		return 0;
2228 
2229 	if (!mmc->ext_csd)
2230 		memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2231 
2232 	err = mmc_send_ext_csd(mmc, ext_csd);
2233 	if (err)
2234 		goto error;
2235 
2236 	/* store the ext csd for future reference */
2237 	if (!mmc->ext_csd)
2238 		mmc->ext_csd = ext_csd;
2239 #else
2240 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2241 
2242 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2243 		return 0;
2244 
2245 	/* check  ext_csd version and capacity */
2246 	err = mmc_send_ext_csd(mmc, ext_csd);
2247 	if (err)
2248 		goto error;
2249 
2250 	/* store the ext csd for future reference */
2251 	if (!mmc->ext_csd)
2252 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2253 	if (!mmc->ext_csd)
2254 		return -ENOMEM;
2255 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2256 #endif
2257 	if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2258 		return -EINVAL;
2259 
2260 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2261 
2262 	if (mmc->version >= MMC_VERSION_4_2) {
2263 		/*
2264 		 * According to the JEDEC Standard, the value of
2265 		 * ext_csd's capacity is valid if the value is more
2266 		 * than 2GB
2267 		 */
2268 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2269 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2270 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2271 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2272 		capacity *= MMC_MAX_BLOCK_LEN;
2273 		if ((capacity >> 20) > 2 * 1024)
2274 			mmc->capacity_user = capacity;
2275 	}
2276 
2277 	if (mmc->version >= MMC_VERSION_4_5)
2278 		mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2279 
2280 	/* The partition data may be non-zero but it is only
2281 	 * effective if PARTITION_SETTING_COMPLETED is set in
2282 	 * EXT_CSD, so ignore any data if this bit is not set,
2283 	 * except for enabling the high-capacity group size
2284 	 * definition (see below).
2285 	 */
2286 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2287 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2288 
2289 	mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2290 	/* Some eMMC set the value too low so set a minimum */
2291 	if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2292 		mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2293 
2294 	/* store the partition info of emmc */
2295 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2296 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2297 	    ext_csd[EXT_CSD_BOOT_MULT])
2298 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2299 	if (part_completed &&
2300 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2301 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2302 
2303 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2304 
2305 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2306 
2307 	for (i = 0; i < 4; i++) {
2308 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2309 		uint mult = (ext_csd[idx + 2] << 16) +
2310 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2311 		if (mult)
2312 			has_parts = true;
2313 		if (!part_completed)
2314 			continue;
2315 		mmc->capacity_gp[i] = mult;
2316 		mmc->capacity_gp[i] *=
2317 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2318 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2319 		mmc->capacity_gp[i] <<= 19;
2320 	}
2321 
2322 #ifndef CONFIG_SPL_BUILD
2323 	if (part_completed) {
2324 		mmc->enh_user_size =
2325 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2326 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2327 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2328 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2329 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2330 		mmc->enh_user_size <<= 19;
2331 		mmc->enh_user_start =
2332 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2333 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2334 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2335 			ext_csd[EXT_CSD_ENH_START_ADDR];
2336 		if (mmc->high_capacity)
2337 			mmc->enh_user_start <<= 9;
2338 	}
2339 #endif
2340 
2341 	/*
2342 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2343 	 * partitioned. This bit will be lost every time after a reset
2344 	 * or power off. This will affect erase size.
2345 	 */
2346 	if (part_completed)
2347 		has_parts = true;
2348 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2349 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2350 		has_parts = true;
2351 	if (has_parts) {
2352 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2353 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2354 
2355 		if (err)
2356 			goto error;
2357 
2358 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2359 	}
2360 
2361 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2362 #if CONFIG_IS_ENABLED(MMC_WRITE)
2363 		/* Read out group size from ext_csd */
2364 		mmc->erase_grp_size =
2365 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2366 #endif
2367 		/*
2368 		 * if high capacity and partition setting completed
2369 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2370 		 * JEDEC Standard JESD84-B45, 6.2.4
2371 		 */
2372 		if (mmc->high_capacity && part_completed) {
2373 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2374 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2375 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2376 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2377 			capacity *= MMC_MAX_BLOCK_LEN;
2378 			mmc->capacity_user = capacity;
2379 		}
2380 	}
2381 #if CONFIG_IS_ENABLED(MMC_WRITE)
2382 	else {
2383 		/* Calculate the group size from the csd value. */
2384 		int erase_gsz, erase_gmul;
2385 
2386 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2387 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2388 		mmc->erase_grp_size = (erase_gsz + 1)
2389 			* (erase_gmul + 1);
2390 	}
2391 #endif
2392 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2393 	mmc->hc_wp_grp_size = 1024
2394 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2395 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2396 #endif
2397 
2398 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2399 
2400 	return 0;
2401 error:
2402 	if (mmc->ext_csd) {
2403 #if !CONFIG_IS_ENABLED(MMC_TINY)
2404 		free(mmc->ext_csd);
2405 #endif
2406 		mmc->ext_csd = NULL;
2407 	}
2408 	return err;
2409 }
2410 
mmc_startup(struct mmc * mmc)2411 static int mmc_startup(struct mmc *mmc)
2412 {
2413 	int err, i;
2414 	uint mult, freq;
2415 	u64 cmult, csize;
2416 	struct mmc_cmd cmd;
2417 	struct blk_desc *bdesc;
2418 
2419 #ifdef CONFIG_MMC_SPI_CRC_ON
2420 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2421 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2422 		cmd.resp_type = MMC_RSP_R1;
2423 		cmd.cmdarg = 1;
2424 		err = mmc_send_cmd(mmc, &cmd, NULL);
2425 		if (err)
2426 			return err;
2427 	}
2428 #endif
2429 
2430 	/* Put the Card in Identify Mode */
2431 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2432 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2433 	cmd.resp_type = MMC_RSP_R2;
2434 	cmd.cmdarg = 0;
2435 
2436 	err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2437 	if (err)
2438 		return err;
2439 
2440 	memcpy(mmc->cid, cmd.response, 16);
2441 
2442 	/*
2443 	 * For MMC cards, set the Relative Address.
2444 	 * For SD cards, get the Relatvie Address.
2445 	 * This also puts the cards into Standby State
2446 	 */
2447 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2448 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2449 		cmd.cmdarg = mmc->rca << 16;
2450 		cmd.resp_type = MMC_RSP_R6;
2451 
2452 		err = mmc_send_cmd(mmc, &cmd, NULL);
2453 
2454 		if (err)
2455 			return err;
2456 
2457 		if (IS_SD(mmc))
2458 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2459 	}
2460 
2461 	/* Get the Card-Specific Data */
2462 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2463 	cmd.resp_type = MMC_RSP_R2;
2464 	cmd.cmdarg = mmc->rca << 16;
2465 
2466 	err = mmc_send_cmd(mmc, &cmd, NULL);
2467 
2468 	if (err)
2469 		return err;
2470 
2471 	mmc->csd[0] = cmd.response[0];
2472 	mmc->csd[1] = cmd.response[1];
2473 	mmc->csd[2] = cmd.response[2];
2474 	mmc->csd[3] = cmd.response[3];
2475 
2476 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2477 		int version = (cmd.response[0] >> 26) & 0xf;
2478 
2479 		switch (version) {
2480 		case 0:
2481 			mmc->version = MMC_VERSION_1_2;
2482 			break;
2483 		case 1:
2484 			mmc->version = MMC_VERSION_1_4;
2485 			break;
2486 		case 2:
2487 			mmc->version = MMC_VERSION_2_2;
2488 			break;
2489 		case 3:
2490 			mmc->version = MMC_VERSION_3;
2491 			break;
2492 		case 4:
2493 			mmc->version = MMC_VERSION_4;
2494 			break;
2495 		default:
2496 			mmc->version = MMC_VERSION_1_2;
2497 			break;
2498 		}
2499 	}
2500 
2501 	/* divide frequency by 10, since the mults are 10x bigger */
2502 	freq = fbase[(cmd.response[0] & 0x7)];
2503 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2504 
2505 	mmc->legacy_speed = freq * mult;
2506 	mmc_select_mode(mmc, MMC_LEGACY);
2507 
2508 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2509 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2510 #if CONFIG_IS_ENABLED(MMC_WRITE)
2511 
2512 	if (IS_SD(mmc))
2513 		mmc->write_bl_len = mmc->read_bl_len;
2514 	else
2515 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2516 #endif
2517 
2518 	if (mmc->high_capacity) {
2519 		csize = (mmc->csd[1] & 0x3f) << 16
2520 			| (mmc->csd[2] & 0xffff0000) >> 16;
2521 		cmult = 8;
2522 	} else {
2523 		csize = (mmc->csd[1] & 0x3ff) << 2
2524 			| (mmc->csd[2] & 0xc0000000) >> 30;
2525 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2526 	}
2527 
2528 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2529 	mmc->capacity_user *= mmc->read_bl_len;
2530 	mmc->capacity_boot = 0;
2531 	mmc->capacity_rpmb = 0;
2532 	for (i = 0; i < 4; i++)
2533 		mmc->capacity_gp[i] = 0;
2534 
2535 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2536 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2537 
2538 #if CONFIG_IS_ENABLED(MMC_WRITE)
2539 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2540 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2541 #endif
2542 
2543 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2544 		cmd.cmdidx = MMC_CMD_SET_DSR;
2545 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2546 		cmd.resp_type = MMC_RSP_NONE;
2547 		if (mmc_send_cmd(mmc, &cmd, NULL))
2548 			pr_warn("MMC: SET_DSR failed\n");
2549 	}
2550 
2551 	/* Select the card, and put it into Transfer Mode */
2552 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2553 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2554 		cmd.resp_type = MMC_RSP_R1;
2555 		cmd.cmdarg = mmc->rca << 16;
2556 		err = mmc_send_cmd(mmc, &cmd, NULL);
2557 
2558 		if (err)
2559 			return err;
2560 	}
2561 
2562 	/*
2563 	 * For SD, its erase group is always one sector
2564 	 */
2565 #if CONFIG_IS_ENABLED(MMC_WRITE)
2566 	mmc->erase_grp_size = 1;
2567 #endif
2568 	mmc->part_config = MMCPART_NOAVAILABLE;
2569 
2570 	err = mmc_startup_v4(mmc);
2571 	if (err)
2572 		return err;
2573 
2574 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2575 	if (err)
2576 		return err;
2577 
2578 #if CONFIG_IS_ENABLED(MMC_TINY)
2579 	mmc_set_clock(mmc, mmc->legacy_speed, false);
2580 	mmc_select_mode(mmc, MMC_LEGACY);
2581 	mmc_set_bus_width(mmc, 1);
2582 #else
2583 	if (IS_SD(mmc)) {
2584 		err = sd_get_capabilities(mmc);
2585 		if (err)
2586 			return err;
2587 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2588 	} else {
2589 		err = mmc_get_capabilities(mmc);
2590 		if (err)
2591 			return err;
2592 		err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2593 	}
2594 #endif
2595 	if (err)
2596 		return err;
2597 
2598 	mmc->best_mode = mmc->selected_mode;
2599 
2600 	/* Fix the block length for DDR mode */
2601 	if (mmc->ddr_mode) {
2602 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2603 #if CONFIG_IS_ENABLED(MMC_WRITE)
2604 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2605 #endif
2606 	}
2607 
2608 	/* fill in device description */
2609 	bdesc = mmc_get_blk_desc(mmc);
2610 	bdesc->lun = 0;
2611 	bdesc->hwpart = 0;
2612 	bdesc->type = 0;
2613 	bdesc->blksz = mmc->read_bl_len;
2614 	bdesc->log2blksz = LOG2(bdesc->blksz);
2615 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2616 #if !defined(CONFIG_SPL_BUILD) || \
2617 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2618 		!CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2619 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2620 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2621 		(mmc->cid[3] >> 16) & 0xffff);
2622 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2623 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2624 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2625 		(mmc->cid[2] >> 24) & 0xff);
2626 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2627 		(mmc->cid[2] >> 16) & 0xf);
2628 #else
2629 	bdesc->vendor[0] = 0;
2630 	bdesc->product[0] = 0;
2631 	bdesc->revision[0] = 0;
2632 #endif
2633 
2634 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2635 	part_init(bdesc);
2636 #endif
2637 
2638 	return 0;
2639 }
2640 
mmc_send_if_cond(struct mmc * mmc)2641 static int mmc_send_if_cond(struct mmc *mmc)
2642 {
2643 	struct mmc_cmd cmd;
2644 	int err;
2645 
2646 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2647 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2648 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2649 	cmd.resp_type = MMC_RSP_R7;
2650 
2651 	err = mmc_send_cmd(mmc, &cmd, NULL);
2652 
2653 	if (err)
2654 		return err;
2655 
2656 	if ((cmd.response[0] & 0xff) != 0xaa)
2657 		return -EOPNOTSUPP;
2658 	else
2659 		mmc->version = SD_VERSION_2;
2660 
2661 	return 0;
2662 }
2663 
2664 #if !CONFIG_IS_ENABLED(DM_MMC)
2665 /* board-specific MMC power initializations. */
board_mmc_power_init(void)2666 __weak void board_mmc_power_init(void)
2667 {
2668 }
2669 #endif
2670 
mmc_power_init(struct mmc * mmc)2671 static int mmc_power_init(struct mmc *mmc)
2672 {
2673 #if CONFIG_IS_ENABLED(DM_MMC)
2674 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2675 	int ret;
2676 
2677 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2678 					  &mmc->vmmc_supply);
2679 	if (ret)
2680 		pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2681 
2682 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2683 					  &mmc->vqmmc_supply);
2684 	if (ret)
2685 		pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2686 #endif
2687 #else /* !CONFIG_DM_MMC */
2688 	/*
2689 	 * Driver model should use a regulator, as above, rather than calling
2690 	 * out to board code.
2691 	 */
2692 	board_mmc_power_init();
2693 #endif
2694 	return 0;
2695 }
2696 
2697 /*
2698  * put the host in the initial state:
2699  * - turn on Vdd (card power supply)
2700  * - configure the bus width and clock to minimal values
2701  */
mmc_set_initial_state(struct mmc * mmc)2702 static void mmc_set_initial_state(struct mmc *mmc)
2703 {
2704 	int err;
2705 
2706 	/* First try to set 3.3V. If it fails set to 1.8V */
2707 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2708 	if (err != 0)
2709 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2710 	if (err != 0)
2711 		pr_warn("mmc: failed to set signal voltage\n");
2712 
2713 	mmc_select_mode(mmc, MMC_LEGACY);
2714 	mmc_set_bus_width(mmc, 1);
2715 	mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2716 }
2717 
mmc_power_on(struct mmc * mmc)2718 static int mmc_power_on(struct mmc *mmc)
2719 {
2720 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2721 	if (mmc->vmmc_supply) {
2722 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2723 
2724 		if (ret && ret != -EACCES) {
2725 			printf("Error enabling VMMC supply : %d\n", ret);
2726 			return ret;
2727 		}
2728 	}
2729 #endif
2730 	return 0;
2731 }
2732 
mmc_power_off(struct mmc * mmc)2733 static int mmc_power_off(struct mmc *mmc)
2734 {
2735 	mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2736 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2737 	if (mmc->vmmc_supply) {
2738 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2739 
2740 		if (ret && ret != -EACCES) {
2741 			pr_debug("Error disabling VMMC supply : %d\n", ret);
2742 			return ret;
2743 		}
2744 	}
2745 #endif
2746 	return 0;
2747 }
2748 
mmc_power_cycle(struct mmc * mmc)2749 static int mmc_power_cycle(struct mmc *mmc)
2750 {
2751 	int ret;
2752 
2753 	ret = mmc_power_off(mmc);
2754 	if (ret)
2755 		return ret;
2756 
2757 	ret = mmc_host_power_cycle(mmc);
2758 	if (ret)
2759 		return ret;
2760 
2761 	/*
2762 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2763 	 * to be on the safer side.
2764 	 */
2765 	udelay(2000);
2766 	return mmc_power_on(mmc);
2767 }
2768 
mmc_get_op_cond(struct mmc * mmc)2769 int mmc_get_op_cond(struct mmc *mmc)
2770 {
2771 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2772 	int err;
2773 
2774 	if (mmc->has_init)
2775 		return 0;
2776 
2777 	err = mmc_power_init(mmc);
2778 	if (err)
2779 		return err;
2780 
2781 #ifdef CONFIG_MMC_QUIRKS
2782 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2783 		      MMC_QUIRK_RETRY_SEND_CID |
2784 		      MMC_QUIRK_RETRY_APP_CMD;
2785 #endif
2786 
2787 	err = mmc_power_cycle(mmc);
2788 	if (err) {
2789 		/*
2790 		 * if power cycling is not supported, we should not try
2791 		 * to use the UHS modes, because we wouldn't be able to
2792 		 * recover from an error during the UHS initialization.
2793 		 */
2794 		pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2795 		uhs_en = false;
2796 		mmc->host_caps &= ~UHS_CAPS;
2797 		err = mmc_power_on(mmc);
2798 	}
2799 	if (err)
2800 		return err;
2801 
2802 #if CONFIG_IS_ENABLED(DM_MMC)
2803 	/*
2804 	 * Re-initialization is needed to clear old configuration for
2805 	 * mmc rescan.
2806 	 */
2807 	err = mmc_reinit(mmc);
2808 #else
2809 	/* made sure it's not NULL earlier */
2810 	err = mmc->cfg->ops->init(mmc);
2811 #endif
2812 	if (err)
2813 		return err;
2814 	mmc->ddr_mode = 0;
2815 
2816 retry:
2817 	mmc_set_initial_state(mmc);
2818 
2819 	/* Reset the Card */
2820 	err = mmc_go_idle(mmc);
2821 
2822 	if (err)
2823 		return err;
2824 
2825 	/* The internal partition reset to user partition(0) at every CMD0 */
2826 	mmc_get_blk_desc(mmc)->hwpart = 0;
2827 
2828 	/* Test for SD version 2 */
2829 	err = mmc_send_if_cond(mmc);
2830 
2831 	/* Now try to get the SD card's operating condition */
2832 	err = sd_send_op_cond(mmc, uhs_en);
2833 	if (err && uhs_en) {
2834 		uhs_en = false;
2835 		mmc_power_cycle(mmc);
2836 		goto retry;
2837 	}
2838 
2839 	/* If the command timed out, we check for an MMC card */
2840 	if (err == -ETIMEDOUT) {
2841 		err = mmc_send_op_cond(mmc);
2842 
2843 		if (err) {
2844 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2845 			pr_err("Card did not respond to voltage select! : %d\n", err);
2846 #endif
2847 			return -EOPNOTSUPP;
2848 		}
2849 	}
2850 
2851 	return err;
2852 }
2853 
mmc_start_init(struct mmc * mmc)2854 int mmc_start_init(struct mmc *mmc)
2855 {
2856 	bool no_card;
2857 	int err = 0;
2858 
2859 	/*
2860 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2861 	 * timings.
2862 	 */
2863 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2864 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2865 #if CONFIG_IS_ENABLED(DM_MMC)
2866 	mmc_deferred_probe(mmc);
2867 #endif
2868 #if !defined(CONFIG_MMC_BROKEN_CD)
2869 	no_card = mmc_getcd(mmc) == 0;
2870 #else
2871 	no_card = 0;
2872 #endif
2873 #if !CONFIG_IS_ENABLED(DM_MMC)
2874 	/* we pretend there's no card when init is NULL */
2875 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2876 #endif
2877 	if (no_card) {
2878 		mmc->has_init = 0;
2879 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2880 		pr_err("MMC: no card present\n");
2881 #endif
2882 		return -ENOMEDIUM;
2883 	}
2884 
2885 	err = mmc_get_op_cond(mmc);
2886 
2887 	if (!err)
2888 		mmc->init_in_progress = 1;
2889 
2890 	return err;
2891 }
2892 
mmc_complete_init(struct mmc * mmc)2893 static int mmc_complete_init(struct mmc *mmc)
2894 {
2895 	int err = 0;
2896 
2897 	mmc->init_in_progress = 0;
2898 	if (mmc->op_cond_pending)
2899 		err = mmc_complete_op_cond(mmc);
2900 
2901 	if (!err)
2902 		err = mmc_startup(mmc);
2903 	if (err)
2904 		mmc->has_init = 0;
2905 	else
2906 		mmc->has_init = 1;
2907 	return err;
2908 }
2909 
mmc_init(struct mmc * mmc)2910 int mmc_init(struct mmc *mmc)
2911 {
2912 	int err = 0;
2913 	__maybe_unused ulong start;
2914 #if CONFIG_IS_ENABLED(DM_MMC)
2915 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2916 
2917 	upriv->mmc = mmc;
2918 #endif
2919 	if (mmc->has_init)
2920 		return 0;
2921 
2922 	start = get_timer(0);
2923 
2924 	if (!mmc->init_in_progress)
2925 		err = mmc_start_init(mmc);
2926 
2927 	if (!err)
2928 		err = mmc_complete_init(mmc);
2929 	if (err)
2930 		pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2931 
2932 	return err;
2933 }
2934 
2935 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2936     CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2937     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
mmc_deinit(struct mmc * mmc)2938 int mmc_deinit(struct mmc *mmc)
2939 {
2940 	u32 caps_filtered;
2941 
2942 	if (!mmc->has_init)
2943 		return 0;
2944 
2945 	if (IS_SD(mmc)) {
2946 		caps_filtered = mmc->card_caps &
2947 			~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2948 			  MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2949 			  MMC_CAP(UHS_SDR104));
2950 
2951 		return sd_select_mode_and_width(mmc, caps_filtered);
2952 	} else {
2953 		caps_filtered = mmc->card_caps &
2954 			~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2955 
2956 		return mmc_select_mode_and_width(mmc, caps_filtered);
2957 	}
2958 }
2959 #endif
2960 
mmc_set_dsr(struct mmc * mmc,u16 val)2961 int mmc_set_dsr(struct mmc *mmc, u16 val)
2962 {
2963 	mmc->dsr = val;
2964 	return 0;
2965 }
2966 
2967 /* CPU-specific MMC initializations */
cpu_mmc_init(struct bd_info * bis)2968 __weak int cpu_mmc_init(struct bd_info *bis)
2969 {
2970 	return -1;
2971 }
2972 
2973 /* board-specific MMC initializations. */
board_mmc_init(struct bd_info * bis)2974 __weak int board_mmc_init(struct bd_info *bis)
2975 {
2976 	return -1;
2977 }
2978 
mmc_set_preinit(struct mmc * mmc,int preinit)2979 void mmc_set_preinit(struct mmc *mmc, int preinit)
2980 {
2981 	mmc->preinit = preinit;
2982 }
2983 
2984 #if CONFIG_IS_ENABLED(DM_MMC)
mmc_probe(struct bd_info * bis)2985 static int mmc_probe(struct bd_info *bis)
2986 {
2987 	int ret, i;
2988 	struct uclass *uc;
2989 	struct udevice *dev;
2990 
2991 	ret = uclass_get(UCLASS_MMC, &uc);
2992 	if (ret)
2993 		return ret;
2994 
2995 	/*
2996 	 * Try to add them in sequence order. Really with driver model we
2997 	 * should allow holes, but the current MMC list does not allow that.
2998 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2999 	 */
3000 	for (i = 0; ; i++) {
3001 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3002 		if (ret == -ENODEV)
3003 			break;
3004 	}
3005 	uclass_foreach_dev(dev, uc) {
3006 		ret = device_probe(dev);
3007 		if (ret)
3008 			pr_err("%s - probe failed: %d\n", dev->name, ret);
3009 	}
3010 
3011 	return 0;
3012 }
3013 #else
mmc_probe(struct bd_info * bis)3014 static int mmc_probe(struct bd_info *bis)
3015 {
3016 	if (board_mmc_init(bis) < 0)
3017 		cpu_mmc_init(bis);
3018 
3019 	return 0;
3020 }
3021 #endif
3022 
mmc_initialize(struct bd_info * bis)3023 int mmc_initialize(struct bd_info *bis)
3024 {
3025 	static int initialized = 0;
3026 	int ret;
3027 	if (initialized)	/* Avoid initializing mmc multiple times */
3028 		return 0;
3029 	initialized = 1;
3030 
3031 #if !CONFIG_IS_ENABLED(BLK)
3032 #if !CONFIG_IS_ENABLED(MMC_TINY)
3033 	mmc_list_init();
3034 #endif
3035 #endif
3036 	ret = mmc_probe(bis);
3037 	if (ret)
3038 		return ret;
3039 
3040 #ifndef CONFIG_SPL_BUILD
3041 	print_mmc_devices(',');
3042 #endif
3043 
3044 	mmc_do_preinit();
3045 	return 0;
3046 }
3047 
3048 #if CONFIG_IS_ENABLED(DM_MMC)
mmc_init_device(int num)3049 int mmc_init_device(int num)
3050 {
3051 	struct udevice *dev;
3052 	struct mmc *m;
3053 	int ret;
3054 
3055 	ret = uclass_get_device(UCLASS_MMC, num, &dev);
3056 	if (ret)
3057 		return ret;
3058 
3059 	m = mmc_get_mmc_dev(dev);
3060 	if (!m)
3061 		return 0;
3062 	if (m->preinit)
3063 		mmc_start_init(m);
3064 
3065 	return 0;
3066 }
3067 #endif
3068 
3069 #ifdef CONFIG_CMD_BKOPS_ENABLE
mmc_set_bkops_enable(struct mmc * mmc)3070 int mmc_set_bkops_enable(struct mmc *mmc)
3071 {
3072 	int err;
3073 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3074 
3075 	err = mmc_send_ext_csd(mmc, ext_csd);
3076 	if (err) {
3077 		puts("Could not get ext_csd register values\n");
3078 		return err;
3079 	}
3080 
3081 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3082 		puts("Background operations not supported on device\n");
3083 		return -EMEDIUMTYPE;
3084 	}
3085 
3086 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3087 		puts("Background operations already enabled\n");
3088 		return 0;
3089 	}
3090 
3091 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3092 	if (err) {
3093 		puts("Failed to enable manual background operations\n");
3094 		return err;
3095 	}
3096 
3097 	puts("Enabled manual background operations\n");
3098 
3099 	return 0;
3100 }
3101 #endif
3102 
mmc_get_env_dev(void)3103 __weak int mmc_get_env_dev(void)
3104 {
3105 #ifdef CONFIG_SYS_MMC_ENV_DEV
3106 	return CONFIG_SYS_MMC_ENV_DEV;
3107 #else
3108 	return 0;
3109 #endif
3110 }
3111