1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
4  * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
5  *
6  * Copyright 2008 Embedded Alley Solutions, Inc.
7  * Copyright 2009-2011 Freescale Semiconductor, Inc.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/ioport.h>
13 #include <linux/of.h>
14 #include <linux/of_device.h>
15 #include <linux/platform_device.h>
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma/mxs-dma.h>
21 #include <linux/highmem.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/completion.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/mmc.h>
27 #include <linux/mmc/sdio.h>
28 #include <linux/mmc/slot-gpio.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/module.h>
31 #include <linux/stmp_device.h>
32 #include <linux/spi/mxs-spi.h>
33 
34 #define DRIVER_NAME	"mxs-mmc"
35 
36 #define MXS_MMC_IRQ_BITS	(BM_SSP_CTRL1_SDIO_IRQ		| \
37 				 BM_SSP_CTRL1_RESP_ERR_IRQ	| \
38 				 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ	| \
39 				 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ	| \
40 				 BM_SSP_CTRL1_DATA_CRC_IRQ	| \
41 				 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ	| \
42 				 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ  | \
43 				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
44 
45 /* card detect polling timeout */
46 #define MXS_MMC_DETECT_TIMEOUT			(HZ/2)
47 
48 struct mxs_mmc_host {
49 	struct mxs_ssp			ssp;
50 
51 	struct mmc_host			*mmc;
52 	struct mmc_request		*mrq;
53 	struct mmc_command		*cmd;
54 	struct mmc_data			*data;
55 
56 	unsigned char			bus_width;
57 	spinlock_t			lock;
58 	int				sdio_irq_en;
59 	bool				broken_cd;
60 };
61 
mxs_mmc_get_cd(struct mmc_host * mmc)62 static int mxs_mmc_get_cd(struct mmc_host *mmc)
63 {
64 	struct mxs_mmc_host *host = mmc_priv(mmc);
65 	struct mxs_ssp *ssp = &host->ssp;
66 	int present, ret;
67 
68 	if (host->broken_cd)
69 		return -ENOSYS;
70 
71 	ret = mmc_gpio_get_cd(mmc);
72 	if (ret >= 0)
73 		return ret;
74 
75 	present = mmc->caps & MMC_CAP_NEEDS_POLL ||
76 		!(readl(ssp->base + HW_SSP_STATUS(ssp)) &
77 			BM_SSP_STATUS_CARD_DETECT);
78 
79 	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
80 		present = !present;
81 
82 	return present;
83 }
84 
mxs_mmc_reset(struct mxs_mmc_host * host)85 static int mxs_mmc_reset(struct mxs_mmc_host *host)
86 {
87 	struct mxs_ssp *ssp = &host->ssp;
88 	u32 ctrl0, ctrl1;
89 	int ret;
90 
91 	ret = stmp_reset_block(ssp->base);
92 	if (ret)
93 		return ret;
94 
95 	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
96 	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
97 		BF_SSP(0x7, CTRL1_WORD_LENGTH) |
98 		BM_SSP_CTRL1_DMA_ENABLE |
99 		BM_SSP_CTRL1_POLARITY |
100 		BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
101 		BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
102 		BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
103 		BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
104 		BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
105 
106 	writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
107 	       BF_SSP(2, TIMING_CLOCK_DIVIDE) |
108 	       BF_SSP(0, TIMING_CLOCK_RATE),
109 	       ssp->base + HW_SSP_TIMING(ssp));
110 
111 	if (host->sdio_irq_en) {
112 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
113 		ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
114 	}
115 
116 	writel(ctrl0, ssp->base + HW_SSP_CTRL0);
117 	writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
118 	return 0;
119 }
120 
121 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
122 			      struct mmc_command *cmd);
123 
mxs_mmc_request_done(struct mxs_mmc_host * host)124 static void mxs_mmc_request_done(struct mxs_mmc_host *host)
125 {
126 	struct mmc_command *cmd = host->cmd;
127 	struct mmc_data *data = host->data;
128 	struct mmc_request *mrq = host->mrq;
129 	struct mxs_ssp *ssp = &host->ssp;
130 
131 	if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
132 		if (mmc_resp_type(cmd) & MMC_RSP_136) {
133 			cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
134 			cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
135 			cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
136 			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
137 		} else {
138 			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
139 		}
140 	}
141 
142 	if (cmd == mrq->sbc) {
143 		/* Finished CMD23, now send actual command. */
144 		mxs_mmc_start_cmd(host, mrq->cmd);
145 		return;
146 	} else if (data) {
147 		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
148 			     data->sg_len, ssp->dma_dir);
149 		/*
150 		 * If there was an error on any block, we mark all
151 		 * data blocks as being in error.
152 		 */
153 		if (!data->error)
154 			data->bytes_xfered = data->blocks * data->blksz;
155 		else
156 			data->bytes_xfered = 0;
157 
158 		host->data = NULL;
159 		if (data->stop && (data->error || !mrq->sbc)) {
160 			mxs_mmc_start_cmd(host, mrq->stop);
161 			return;
162 		}
163 	}
164 
165 	host->mrq = NULL;
166 	mmc_request_done(host->mmc, mrq);
167 }
168 
mxs_mmc_dma_irq_callback(void * param)169 static void mxs_mmc_dma_irq_callback(void *param)
170 {
171 	struct mxs_mmc_host *host = param;
172 
173 	mxs_mmc_request_done(host);
174 }
175 
mxs_mmc_irq_handler(int irq,void * dev_id)176 static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
177 {
178 	struct mxs_mmc_host *host = dev_id;
179 	struct mmc_command *cmd = host->cmd;
180 	struct mmc_data *data = host->data;
181 	struct mxs_ssp *ssp = &host->ssp;
182 	u32 stat;
183 
184 	spin_lock(&host->lock);
185 
186 	stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
187 	writel(stat & MXS_MMC_IRQ_BITS,
188 	       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
189 
190 	spin_unlock(&host->lock);
191 
192 	if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
193 		mmc_signal_sdio_irq(host->mmc);
194 
195 	if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
196 		cmd->error = -ETIMEDOUT;
197 	else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
198 		cmd->error = -EIO;
199 
200 	if (data) {
201 		if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
202 			    BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
203 			data->error = -ETIMEDOUT;
204 		else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
205 			data->error = -EILSEQ;
206 		else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
207 				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
208 			data->error = -EIO;
209 	}
210 
211 	return IRQ_HANDLED;
212 }
213 
mxs_mmc_prep_dma(struct mxs_mmc_host * host,unsigned long flags)214 static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
215 	struct mxs_mmc_host *host, unsigned long flags)
216 {
217 	struct mxs_ssp *ssp = &host->ssp;
218 	struct dma_async_tx_descriptor *desc;
219 	struct mmc_data *data = host->data;
220 	struct scatterlist * sgl;
221 	unsigned int sg_len;
222 
223 	if (data) {
224 		/* data */
225 		dma_map_sg(mmc_dev(host->mmc), data->sg,
226 			   data->sg_len, ssp->dma_dir);
227 		sgl = data->sg;
228 		sg_len = data->sg_len;
229 	} else {
230 		/* pio */
231 		sgl = (struct scatterlist *) ssp->ssp_pio_words;
232 		sg_len = SSP_PIO_NUM;
233 	}
234 
235 	desc = dmaengine_prep_slave_sg(ssp->dmach,
236 				sgl, sg_len, ssp->slave_dirn, flags);
237 	if (desc) {
238 		desc->callback = mxs_mmc_dma_irq_callback;
239 		desc->callback_param = host;
240 	} else {
241 		if (data)
242 			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
243 				     data->sg_len, ssp->dma_dir);
244 	}
245 
246 	return desc;
247 }
248 
mxs_mmc_bc(struct mxs_mmc_host * host)249 static void mxs_mmc_bc(struct mxs_mmc_host *host)
250 {
251 	struct mxs_ssp *ssp = &host->ssp;
252 	struct mmc_command *cmd = host->cmd;
253 	struct dma_async_tx_descriptor *desc;
254 	u32 ctrl0, cmd0, cmd1;
255 
256 	ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
257 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
258 	cmd1 = cmd->arg;
259 
260 	if (host->sdio_irq_en) {
261 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
262 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
263 	}
264 
265 	ssp->ssp_pio_words[0] = ctrl0;
266 	ssp->ssp_pio_words[1] = cmd0;
267 	ssp->ssp_pio_words[2] = cmd1;
268 	ssp->dma_dir = DMA_NONE;
269 	ssp->slave_dirn = DMA_TRANS_NONE;
270 	desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
271 	if (!desc)
272 		goto out;
273 
274 	dmaengine_submit(desc);
275 	dma_async_issue_pending(ssp->dmach);
276 	return;
277 
278 out:
279 	dev_warn(mmc_dev(host->mmc),
280 		 "%s: failed to prep dma\n", __func__);
281 }
282 
mxs_mmc_ac(struct mxs_mmc_host * host)283 static void mxs_mmc_ac(struct mxs_mmc_host *host)
284 {
285 	struct mxs_ssp *ssp = &host->ssp;
286 	struct mmc_command *cmd = host->cmd;
287 	struct dma_async_tx_descriptor *desc;
288 	u32 ignore_crc, get_resp, long_resp;
289 	u32 ctrl0, cmd0, cmd1;
290 
291 	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
292 			0 : BM_SSP_CTRL0_IGNORE_CRC;
293 	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
294 			BM_SSP_CTRL0_GET_RESP : 0;
295 	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
296 			BM_SSP_CTRL0_LONG_RESP : 0;
297 
298 	ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
299 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
300 	cmd1 = cmd->arg;
301 
302 	if (cmd->opcode == MMC_STOP_TRANSMISSION)
303 		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
304 
305 	if (host->sdio_irq_en) {
306 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
307 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
308 	}
309 
310 	ssp->ssp_pio_words[0] = ctrl0;
311 	ssp->ssp_pio_words[1] = cmd0;
312 	ssp->ssp_pio_words[2] = cmd1;
313 	ssp->dma_dir = DMA_NONE;
314 	ssp->slave_dirn = DMA_TRANS_NONE;
315 	desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
316 	if (!desc)
317 		goto out;
318 
319 	dmaengine_submit(desc);
320 	dma_async_issue_pending(ssp->dmach);
321 	return;
322 
323 out:
324 	dev_warn(mmc_dev(host->mmc),
325 		 "%s: failed to prep dma\n", __func__);
326 }
327 
mxs_ns_to_ssp_ticks(unsigned clock_rate,unsigned ns)328 static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
329 {
330 	const unsigned int ssp_timeout_mul = 4096;
331 	/*
332 	 * Calculate ticks in ms since ns are large numbers
333 	 * and might overflow
334 	 */
335 	const unsigned int clock_per_ms = clock_rate / 1000;
336 	const unsigned int ms = ns / 1000;
337 	const unsigned int ticks = ms * clock_per_ms;
338 	const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
339 
340 	WARN_ON(ssp_ticks == 0);
341 	return ssp_ticks;
342 }
343 
mxs_mmc_adtc(struct mxs_mmc_host * host)344 static void mxs_mmc_adtc(struct mxs_mmc_host *host)
345 {
346 	struct mmc_command *cmd = host->cmd;
347 	struct mmc_data *data = cmd->data;
348 	struct dma_async_tx_descriptor *desc;
349 	struct scatterlist *sgl = data->sg, *sg;
350 	unsigned int sg_len = data->sg_len;
351 	unsigned int i;
352 
353 	unsigned short dma_data_dir, timeout;
354 	enum dma_transfer_direction slave_dirn;
355 	unsigned int data_size = 0, log2_blksz;
356 	unsigned int blocks = data->blocks;
357 
358 	struct mxs_ssp *ssp = &host->ssp;
359 
360 	u32 ignore_crc, get_resp, long_resp, read;
361 	u32 ctrl0, cmd0, cmd1, val;
362 
363 	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
364 			0 : BM_SSP_CTRL0_IGNORE_CRC;
365 	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
366 			BM_SSP_CTRL0_GET_RESP : 0;
367 	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
368 			BM_SSP_CTRL0_LONG_RESP : 0;
369 
370 	if (data->flags & MMC_DATA_WRITE) {
371 		dma_data_dir = DMA_TO_DEVICE;
372 		slave_dirn = DMA_MEM_TO_DEV;
373 		read = 0;
374 	} else {
375 		dma_data_dir = DMA_FROM_DEVICE;
376 		slave_dirn = DMA_DEV_TO_MEM;
377 		read = BM_SSP_CTRL0_READ;
378 	}
379 
380 	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
381 		ignore_crc | get_resp | long_resp |
382 		BM_SSP_CTRL0_DATA_XFER | read |
383 		BM_SSP_CTRL0_WAIT_FOR_IRQ |
384 		BM_SSP_CTRL0_ENABLE;
385 
386 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
387 
388 	/* get logarithm to base 2 of block size for setting register */
389 	log2_blksz = ilog2(data->blksz);
390 
391 	/*
392 	 * take special care of the case that data size from data->sg
393 	 * is not equal to blocks x blksz
394 	 */
395 	for_each_sg(sgl, sg, sg_len, i)
396 		data_size += sg->length;
397 
398 	if (data_size != data->blocks * data->blksz)
399 		blocks = 1;
400 
401 	/* xfer count, block size and count need to be set differently */
402 	if (ssp_is_old(ssp)) {
403 		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
404 		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
405 			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
406 	} else {
407 		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
408 		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
409 		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
410 		       ssp->base + HW_SSP_BLOCK_SIZE);
411 	}
412 
413 	if (cmd->opcode == SD_IO_RW_EXTENDED)
414 		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
415 
416 	cmd1 = cmd->arg;
417 
418 	if (host->sdio_irq_en) {
419 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
420 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
421 	}
422 
423 	/* set the timeout count */
424 	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
425 	val = readl(ssp->base + HW_SSP_TIMING(ssp));
426 	val &= ~(BM_SSP_TIMING_TIMEOUT);
427 	val |= BF_SSP(timeout, TIMING_TIMEOUT);
428 	writel(val, ssp->base + HW_SSP_TIMING(ssp));
429 
430 	/* pio */
431 	ssp->ssp_pio_words[0] = ctrl0;
432 	ssp->ssp_pio_words[1] = cmd0;
433 	ssp->ssp_pio_words[2] = cmd1;
434 	ssp->dma_dir = DMA_NONE;
435 	ssp->slave_dirn = DMA_TRANS_NONE;
436 	desc = mxs_mmc_prep_dma(host, 0);
437 	if (!desc)
438 		goto out;
439 
440 	/* append data sg */
441 	WARN_ON(host->data != NULL);
442 	host->data = data;
443 	ssp->dma_dir = dma_data_dir;
444 	ssp->slave_dirn = slave_dirn;
445 	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
446 	if (!desc)
447 		goto out;
448 
449 	dmaengine_submit(desc);
450 	dma_async_issue_pending(ssp->dmach);
451 	return;
452 out:
453 	dev_warn(mmc_dev(host->mmc),
454 		 "%s: failed to prep dma\n", __func__);
455 }
456 
mxs_mmc_start_cmd(struct mxs_mmc_host * host,struct mmc_command * cmd)457 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
458 			      struct mmc_command *cmd)
459 {
460 	host->cmd = cmd;
461 
462 	switch (mmc_cmd_type(cmd)) {
463 	case MMC_CMD_BC:
464 		mxs_mmc_bc(host);
465 		break;
466 	case MMC_CMD_BCR:
467 		mxs_mmc_ac(host);
468 		break;
469 	case MMC_CMD_AC:
470 		mxs_mmc_ac(host);
471 		break;
472 	case MMC_CMD_ADTC:
473 		mxs_mmc_adtc(host);
474 		break;
475 	default:
476 		dev_warn(mmc_dev(host->mmc),
477 			 "%s: unknown MMC command\n", __func__);
478 		break;
479 	}
480 }
481 
mxs_mmc_request(struct mmc_host * mmc,struct mmc_request * mrq)482 static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
483 {
484 	struct mxs_mmc_host *host = mmc_priv(mmc);
485 
486 	WARN_ON(host->mrq != NULL);
487 	host->mrq = mrq;
488 
489 	if (mrq->sbc)
490 		mxs_mmc_start_cmd(host, mrq->sbc);
491 	else
492 		mxs_mmc_start_cmd(host, mrq->cmd);
493 }
494 
mxs_mmc_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)495 static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
496 {
497 	struct mxs_mmc_host *host = mmc_priv(mmc);
498 
499 	if (ios->bus_width == MMC_BUS_WIDTH_8)
500 		host->bus_width = 2;
501 	else if (ios->bus_width == MMC_BUS_WIDTH_4)
502 		host->bus_width = 1;
503 	else
504 		host->bus_width = 0;
505 
506 	if (ios->clock)
507 		mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
508 }
509 
mxs_mmc_enable_sdio_irq(struct mmc_host * mmc,int enable)510 static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
511 {
512 	struct mxs_mmc_host *host = mmc_priv(mmc);
513 	struct mxs_ssp *ssp = &host->ssp;
514 	unsigned long flags;
515 
516 	spin_lock_irqsave(&host->lock, flags);
517 
518 	host->sdio_irq_en = enable;
519 
520 	if (enable) {
521 		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
522 		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
523 		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
524 		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
525 	} else {
526 		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
527 		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
528 		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
529 		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
530 	}
531 
532 	spin_unlock_irqrestore(&host->lock, flags);
533 
534 	if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
535 			BM_SSP_STATUS_SDIO_IRQ)
536 		mmc_signal_sdio_irq(host->mmc);
537 
538 }
539 
540 static const struct mmc_host_ops mxs_mmc_ops = {
541 	.request = mxs_mmc_request,
542 	.get_ro = mmc_gpio_get_ro,
543 	.get_cd = mxs_mmc_get_cd,
544 	.set_ios = mxs_mmc_set_ios,
545 	.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
546 };
547 
548 static const struct of_device_id mxs_mmc_dt_ids[] = {
549 	{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
550 	{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
551 	{ /* sentinel */ }
552 };
553 MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
554 
mxs_mmc_regulator_disable(void * regulator)555 static void mxs_mmc_regulator_disable(void *regulator)
556 {
557 	regulator_disable(regulator);
558 }
559 
mxs_mmc_probe(struct platform_device * pdev)560 static int mxs_mmc_probe(struct platform_device *pdev)
561 {
562 	struct device_node *np = pdev->dev.of_node;
563 	struct mxs_mmc_host *host;
564 	struct mmc_host *mmc;
565 	int ret = 0, irq_err;
566 	struct regulator *reg_vmmc;
567 	struct mxs_ssp *ssp;
568 
569 	irq_err = platform_get_irq(pdev, 0);
570 	if (irq_err < 0)
571 		return irq_err;
572 
573 	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
574 	if (!mmc)
575 		return -ENOMEM;
576 
577 	host = mmc_priv(mmc);
578 	ssp = &host->ssp;
579 	ssp->dev = &pdev->dev;
580 	ssp->base = devm_platform_ioremap_resource(pdev, 0);
581 	if (IS_ERR(ssp->base)) {
582 		ret = PTR_ERR(ssp->base);
583 		goto out_mmc_free;
584 	}
585 
586 	ssp->devid = (enum mxs_ssp_id)of_device_get_match_data(&pdev->dev);
587 
588 	host->mmc = mmc;
589 	host->sdio_irq_en = 0;
590 
591 	reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
592 	if (!IS_ERR(reg_vmmc)) {
593 		ret = regulator_enable(reg_vmmc);
594 		if (ret) {
595 			dev_err(&pdev->dev,
596 				"Failed to enable vmmc regulator: %d\n", ret);
597 			goto out_mmc_free;
598 		}
599 
600 		ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
601 					       reg_vmmc);
602 		if (ret)
603 			goto out_mmc_free;
604 	}
605 
606 	ssp->clk = devm_clk_get(&pdev->dev, NULL);
607 	if (IS_ERR(ssp->clk)) {
608 		ret = PTR_ERR(ssp->clk);
609 		goto out_mmc_free;
610 	}
611 	ret = clk_prepare_enable(ssp->clk);
612 	if (ret)
613 		goto out_mmc_free;
614 
615 	ret = mxs_mmc_reset(host);
616 	if (ret) {
617 		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
618 		goto out_clk_disable;
619 	}
620 
621 	ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
622 	if (IS_ERR(ssp->dmach)) {
623 		dev_err(mmc_dev(host->mmc),
624 			"%s: failed to request dma\n", __func__);
625 		ret = PTR_ERR(ssp->dmach);
626 		goto out_clk_disable;
627 	}
628 
629 	/* set mmc core parameters */
630 	mmc->ops = &mxs_mmc_ops;
631 	mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
632 		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
633 
634 	host->broken_cd = of_property_read_bool(np, "broken-cd");
635 
636 	mmc->f_min = 400000;
637 	mmc->f_max = 288000000;
638 
639 	ret = mmc_of_parse(mmc);
640 	if (ret)
641 		goto out_free_dma;
642 
643 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
644 
645 	mmc->max_segs = 52;
646 	mmc->max_blk_size = 1 << 0xf;
647 	mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
648 	mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
649 	mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
650 
651 	platform_set_drvdata(pdev, mmc);
652 
653 	spin_lock_init(&host->lock);
654 
655 	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
656 			       dev_name(&pdev->dev), host);
657 	if (ret)
658 		goto out_free_dma;
659 
660 	ret = mmc_add_host(mmc);
661 	if (ret)
662 		goto out_free_dma;
663 
664 	dev_info(mmc_dev(host->mmc), "initialized\n");
665 
666 	return 0;
667 
668 out_free_dma:
669 	dma_release_channel(ssp->dmach);
670 out_clk_disable:
671 	clk_disable_unprepare(ssp->clk);
672 out_mmc_free:
673 	mmc_free_host(mmc);
674 	return ret;
675 }
676 
mxs_mmc_remove(struct platform_device * pdev)677 static int mxs_mmc_remove(struct platform_device *pdev)
678 {
679 	struct mmc_host *mmc = platform_get_drvdata(pdev);
680 	struct mxs_mmc_host *host = mmc_priv(mmc);
681 	struct mxs_ssp *ssp = &host->ssp;
682 
683 	mmc_remove_host(mmc);
684 
685 	if (ssp->dmach)
686 		dma_release_channel(ssp->dmach);
687 
688 	clk_disable_unprepare(ssp->clk);
689 
690 	mmc_free_host(mmc);
691 
692 	return 0;
693 }
694 
695 #ifdef CONFIG_PM_SLEEP
mxs_mmc_suspend(struct device * dev)696 static int mxs_mmc_suspend(struct device *dev)
697 {
698 	struct mmc_host *mmc = dev_get_drvdata(dev);
699 	struct mxs_mmc_host *host = mmc_priv(mmc);
700 	struct mxs_ssp *ssp = &host->ssp;
701 
702 	clk_disable_unprepare(ssp->clk);
703 	return 0;
704 }
705 
mxs_mmc_resume(struct device * dev)706 static int mxs_mmc_resume(struct device *dev)
707 {
708 	struct mmc_host *mmc = dev_get_drvdata(dev);
709 	struct mxs_mmc_host *host = mmc_priv(mmc);
710 	struct mxs_ssp *ssp = &host->ssp;
711 
712 	return clk_prepare_enable(ssp->clk);
713 }
714 #endif
715 
716 static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
717 
718 static struct platform_driver mxs_mmc_driver = {
719 	.probe		= mxs_mmc_probe,
720 	.remove		= mxs_mmc_remove,
721 	.driver		= {
722 		.name	= DRIVER_NAME,
723 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
724 		.pm	= &mxs_mmc_pm_ops,
725 		.of_match_table = mxs_mmc_dt_ids,
726 	},
727 };
728 
729 module_platform_driver(mxs_mmc_driver);
730 
731 MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
732 MODULE_AUTHOR("Freescale Semiconductor");
733 MODULE_LICENSE("GPL");
734 MODULE_ALIAS("platform:" DRIVER_NAME);
735