1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
4 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
7 */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <asm/global_data.h>
14 #include <dm/device_compat.h>
15 #include <dm/devres.h>
16 #include <linux/bitops.h>
17 #include <linux/bug.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <asm/io.h>
22 #include <asm/arch/cpu.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/rawnand.h>
25 #include <linux/types.h>
26 #include <syscon.h>
27 #include <regmap.h>
28 #include <dm/uclass.h>
29 #include <dm/read.h>
30
31 #include "pxa3xx_nand.h"
32
33 DECLARE_GLOBAL_DATA_PTR;
34
35 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
36 #define CHIP_DELAY_TIMEOUT 200
37 #define NAND_STOP_DELAY 40
38
39 /*
40 * Define a buffer size for the initial command that detects the flash device:
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
46 */
47 #define INIT_BUFFER_SIZE 2048
48
49 /* registers and bit definitions */
50 #define NDCR (0x00) /* Control register */
51 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53 #define NDSR (0x14) /* Status Register */
54 #define NDPCR (0x18) /* Page Count Register */
55 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
56 #define NDBDR1 (0x20) /* Bad Block Register 1 */
57 #define NDECCCTRL (0x28) /* ECC control */
58 #define NDDB (0x40) /* Data Buffer */
59 #define NDCB0 (0x48) /* Command Buffer0 */
60 #define NDCB1 (0x4C) /* Command Buffer1 */
61 #define NDCB2 (0x50) /* Command Buffer2 */
62
63 #define NDCR_SPARE_EN (0x1 << 31)
64 #define NDCR_ECC_EN (0x1 << 30)
65 #define NDCR_DMA_EN (0x1 << 29)
66 #define NDCR_ND_RUN (0x1 << 28)
67 #define NDCR_DWIDTH_C (0x1 << 27)
68 #define NDCR_DWIDTH_M (0x1 << 26)
69 #define NDCR_PAGE_SZ (0x1 << 24)
70 #define NDCR_NCSX (0x1 << 23)
71 #define NDCR_ND_MODE (0x3 << 21)
72 #define NDCR_NAND_MODE (0x0)
73 #define NDCR_CLR_PG_CNT (0x1 << 20)
74 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
76 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
77
78 #define NDCR_RA_START (0x1 << 15)
79 #define NDCR_PG_PER_BLK (0x1 << 14)
80 #define NDCR_ND_ARB_EN (0x1 << 12)
81 #define NDCR_INT_MASK (0xFFF)
82
83 #define NDSR_MASK (0xfff)
84 #define NDSR_ERR_CNT_OFF (16)
85 #define NDSR_ERR_CNT_MASK (0x1f)
86 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
87 #define NDSR_RDY (0x1 << 12)
88 #define NDSR_FLASH_RDY (0x1 << 11)
89 #define NDSR_CS0_PAGED (0x1 << 10)
90 #define NDSR_CS1_PAGED (0x1 << 9)
91 #define NDSR_CS0_CMDD (0x1 << 8)
92 #define NDSR_CS1_CMDD (0x1 << 7)
93 #define NDSR_CS0_BBD (0x1 << 6)
94 #define NDSR_CS1_BBD (0x1 << 5)
95 #define NDSR_UNCORERR (0x1 << 4)
96 #define NDSR_CORERR (0x1 << 3)
97 #define NDSR_WRDREQ (0x1 << 2)
98 #define NDSR_RDDREQ (0x1 << 1)
99 #define NDSR_WRCMDREQ (0x1)
100
101 #define NDCB0_LEN_OVRD (0x1 << 28)
102 #define NDCB0_ST_ROW_EN (0x1 << 26)
103 #define NDCB0_AUTO_RS (0x1 << 25)
104 #define NDCB0_CSEL (0x1 << 24)
105 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
106 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
107 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
108 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
109 #define NDCB0_NC (0x1 << 20)
110 #define NDCB0_DBC (0x1 << 19)
111 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
112 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
113 #define NDCB0_CMD2_MASK (0xff << 8)
114 #define NDCB0_CMD1_MASK (0xff)
115 #define NDCB0_ADDR_CYC_SHIFT (16)
116
117 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
118 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
119 #define EXT_CMD_TYPE_READ 4 /* Read */
120 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
121 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
122 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
123 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
124
125 /* System control register and bit to enable NAND on some SoCs */
126 #define GENCONF_SOC_DEVICE_MUX 0x208
127 #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
128
129 /*
130 * This should be large enough to read 'ONFI' and 'JEDEC'.
131 * Let's use 7 bytes, which is the maximum ID count supported
132 * by the controller (see NDCR_RD_ID_CNT_MASK).
133 */
134 #define READ_ID_BYTES 7
135
136 /* macros for registers read/write */
137 #define nand_writel(info, off, val) \
138 writel((val), (info)->mmio_base + (off))
139
140 #define nand_readl(info, off) \
141 readl((info)->mmio_base + (off))
142
143 /* error code and state */
144 enum {
145 ERR_NONE = 0,
146 ERR_DMABUSERR = -1,
147 ERR_SENDCMD = -2,
148 ERR_UNCORERR = -3,
149 ERR_BBERR = -4,
150 ERR_CORERR = -5,
151 };
152
153 enum {
154 STATE_IDLE = 0,
155 STATE_PREPARED,
156 STATE_CMD_HANDLE,
157 STATE_DMA_READING,
158 STATE_DMA_WRITING,
159 STATE_DMA_DONE,
160 STATE_PIO_READING,
161 STATE_PIO_WRITING,
162 STATE_CMD_DONE,
163 STATE_READY,
164 };
165
166 enum pxa3xx_nand_variant {
167 PXA3XX_NAND_VARIANT_PXA,
168 PXA3XX_NAND_VARIANT_ARMADA370,
169 PXA3XX_NAND_VARIANT_ARMADA_8K,
170 };
171
172 struct pxa3xx_nand_host {
173 struct nand_chip chip;
174 void *info_data;
175
176 /* page size of attached chip */
177 int use_ecc;
178 int cs;
179
180 /* calculated from pxa3xx_nand_flash data */
181 unsigned int col_addr_cycles;
182 unsigned int row_addr_cycles;
183 };
184
185 struct pxa3xx_nand_info {
186 struct nand_hw_control controller;
187 struct pxa3xx_nand_platform_data *pdata;
188
189 struct clk *clk;
190 void __iomem *mmio_base;
191 unsigned long mmio_phys;
192 int cmd_complete, dev_ready;
193
194 unsigned int buf_start;
195 unsigned int buf_count;
196 unsigned int buf_size;
197 unsigned int data_buff_pos;
198 unsigned int oob_buff_pos;
199
200 unsigned char *data_buff;
201 unsigned char *oob_buff;
202
203 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
204 unsigned int state;
205
206 /*
207 * This driver supports NFCv1 (as found in PXA SoC)
208 * and NFCv2 (as found in Armada 370/XP SoC).
209 */
210 enum pxa3xx_nand_variant variant;
211
212 int cs;
213 int use_ecc; /* use HW ECC ? */
214 int force_raw; /* prevent use_ecc to be set */
215 int ecc_bch; /* using BCH ECC? */
216 int use_spare; /* use spare ? */
217 int need_wait;
218
219 /* Amount of real data per full chunk */
220 unsigned int chunk_size;
221
222 /* Amount of spare data per full chunk */
223 unsigned int spare_size;
224
225 /* Number of full chunks (i.e chunk_size + spare_size) */
226 unsigned int nfullchunks;
227
228 /*
229 * Total number of chunks. If equal to nfullchunks, then there
230 * are only full chunks. Otherwise, there is one last chunk of
231 * size (last_chunk_size + last_spare_size)
232 */
233 unsigned int ntotalchunks;
234
235 /* Amount of real data in the last chunk */
236 unsigned int last_chunk_size;
237
238 /* Amount of spare data in the last chunk */
239 unsigned int last_spare_size;
240
241 unsigned int ecc_size;
242 unsigned int ecc_err_cnt;
243 unsigned int max_bitflips;
244 int retcode;
245
246 /*
247 * Variables only valid during command
248 * execution. step_chunk_size and step_spare_size is the
249 * amount of real data and spare data in the current
250 * chunk. cur_chunk is the current chunk being
251 * read/programmed.
252 */
253 unsigned int step_chunk_size;
254 unsigned int step_spare_size;
255 unsigned int cur_chunk;
256
257 /* cached register value */
258 uint32_t reg_ndcr;
259 uint32_t ndtr0cs0;
260 uint32_t ndtr1cs0;
261
262 /* generated NDCBx register values */
263 uint32_t ndcb0;
264 uint32_t ndcb1;
265 uint32_t ndcb2;
266 uint32_t ndcb3;
267 };
268
269 static struct pxa3xx_nand_timing timing[] = {
270 /*
271 * tCH Enable signal hold time
272 * tCS Enable signal setup time
273 * tWH ND_nWE high duration
274 * tWP ND_nWE pulse time
275 * tRH ND_nRE high duration
276 * tRP ND_nRE pulse width
277 * tR ND_nWE high to ND_nRE low for read
278 * tWHR ND_nWE high to ND_nRE low for status read
279 * tAR ND_ALE low to ND_nRE low delay
280 */
281 /*ch cs wh wp rh rp r whr ar */
282 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
283 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
284 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
285 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
286 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
287 };
288
289 static struct pxa3xx_nand_flash builtin_flash_types[] = {
290 /*
291 * chip_id
292 * flash_width Width of Flash memory (DWIDTH_M)
293 * dfc_width Width of flash controller(DWIDTH_C)
294 * *timing
295 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
296 */
297 { 0x46ec, 16, 16, &timing[1] },
298 { 0xdaec, 8, 8, &timing[1] },
299 { 0xd7ec, 8, 8, &timing[1] },
300 { 0xa12c, 8, 8, &timing[2] },
301 { 0xb12c, 16, 16, &timing[2] },
302 { 0xdc2c, 8, 8, &timing[2] },
303 { 0xcc2c, 16, 16, &timing[2] },
304 { 0xba20, 16, 16, &timing[3] },
305 { 0xda98, 8, 8, &timing[4] },
306 };
307
308 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
309 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
310 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
311
312 static struct nand_bbt_descr bbt_main_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .offs = 8,
316 .len = 6,
317 .veroffs = 14,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_pattern
320 };
321
322 static struct nand_bbt_descr bbt_mirror_descr = {
323 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
324 | NAND_BBT_2BIT | NAND_BBT_VERSION,
325 .offs = 8,
326 .len = 6,
327 .veroffs = 14,
328 .maxblocks = 8, /* Last 8 blocks in each chip */
329 .pattern = bbt_mirror_pattern
330 };
331 #endif
332
333 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
334 .eccbytes = 32,
335 .eccpos = {
336 32, 33, 34, 35, 36, 37, 38, 39,
337 40, 41, 42, 43, 44, 45, 46, 47,
338 48, 49, 50, 51, 52, 53, 54, 55,
339 56, 57, 58, 59, 60, 61, 62, 63},
340 .oobfree = { {2, 30} }
341 };
342
343 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
344 .eccbytes = 64,
345 .eccpos = {
346 32, 33, 34, 35, 36, 37, 38, 39,
347 40, 41, 42, 43, 44, 45, 46, 47,
348 48, 49, 50, 51, 52, 53, 54, 55,
349 56, 57, 58, 59, 60, 61, 62, 63,
350 64, 65, 66, 67, 68, 69, 70, 71,
351 72, 73, 74, 75, 76, 77, 78, 79,
352 80, 81, 82, 83, 84, 85, 86, 87,
353 88, 89, 90, 91, 92, 93, 94, 95},
354 .oobfree = { {1, 4}, {6, 26} }
355 };
356
357 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
358 .eccbytes = 64,
359 .eccpos = {
360 32, 33, 34, 35, 36, 37, 38, 39,
361 40, 41, 42, 43, 44, 45, 46, 47,
362 48, 49, 50, 51, 52, 53, 54, 55,
363 56, 57, 58, 59, 60, 61, 62, 63,
364 96, 97, 98, 99, 100, 101, 102, 103,
365 104, 105, 106, 107, 108, 109, 110, 111,
366 112, 113, 114, 115, 116, 117, 118, 119,
367 120, 121, 122, 123, 124, 125, 126, 127},
368 /* Bootrom looks in bytes 0 & 5 for bad blocks */
369 .oobfree = { {6, 26}, { 64, 32} }
370 };
371
372 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
373 .eccbytes = 128,
374 .eccpos = {
375 32, 33, 34, 35, 36, 37, 38, 39,
376 40, 41, 42, 43, 44, 45, 46, 47,
377 48, 49, 50, 51, 52, 53, 54, 55,
378 56, 57, 58, 59, 60, 61, 62, 63,
379
380 96, 97, 98, 99, 100, 101, 102, 103,
381 104, 105, 106, 107, 108, 109, 110, 111,
382 112, 113, 114, 115, 116, 117, 118, 119,
383 120, 121, 122, 123, 124, 125, 126, 127,
384
385 160, 161, 162, 163, 164, 165, 166, 167,
386 168, 169, 170, 171, 172, 173, 174, 175,
387 176, 177, 178, 179, 180, 181, 182, 183,
388 184, 185, 186, 187, 188, 189, 190, 191,
389
390 224, 225, 226, 227, 228, 229, 230, 231,
391 232, 233, 234, 235, 236, 237, 238, 239,
392 240, 241, 242, 243, 244, 245, 246, 247,
393 248, 249, 250, 251, 252, 253, 254, 255},
394
395 /* Bootrom looks in bytes 0 & 5 for bad blocks */
396 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
397 };
398
399 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
400 .eccbytes = 128,
401 .eccpos = {
402 32, 33, 34, 35, 36, 37, 38, 39,
403 40, 41, 42, 43, 44, 45, 46, 47,
404 48, 49, 50, 51, 52, 53, 54, 55,
405 56, 57, 58, 59, 60, 61, 62, 63},
406 .oobfree = { }
407 };
408
409 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
410 .eccbytes = 256,
411 .eccpos = {},
412 /* HW ECC handles all ECC data and all spare area is free for OOB */
413 .oobfree = {{0, 160} }
414 };
415
416 #define NDTR0_tCH(c) (min((c), 7) << 19)
417 #define NDTR0_tCS(c) (min((c), 7) << 16)
418 #define NDTR0_tWH(c) (min((c), 7) << 11)
419 #define NDTR0_tWP(c) (min((c), 7) << 8)
420 #define NDTR0_tRH(c) (min((c), 7) << 3)
421 #define NDTR0_tRP(c) (min((c), 7) << 0)
422
423 #define NDTR1_tR(c) (min((c), 65535) << 16)
424 #define NDTR1_tWHR(c) (min((c), 15) << 4)
425 #define NDTR1_tAR(c) (min((c), 15) << 0)
426
427 /* convert nano-seconds to nand flash controller clock cycles */
428 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
429
430 static const struct udevice_id pxa3xx_nand_dt_ids[] = {
431 {
432 .compatible = "marvell,mvebu-pxa3xx-nand",
433 .data = PXA3XX_NAND_VARIANT_ARMADA370,
434 },
435 {
436 .compatible = "marvell,armada-8k-nand-controller",
437 .data = PXA3XX_NAND_VARIANT_ARMADA_8K,
438 },
439 {}
440 };
441
pxa3xx_nand_get_variant(struct udevice * dev)442 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(struct udevice *dev)
443 {
444 return dev_get_driver_data(dev);
445 }
446
pxa3xx_nand_set_timing(struct pxa3xx_nand_host * host,const struct pxa3xx_nand_timing * t)447 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
448 const struct pxa3xx_nand_timing *t)
449 {
450 struct pxa3xx_nand_info *info = host->info_data;
451 unsigned long nand_clk = mvebu_get_nand_clock();
452 uint32_t ndtr0, ndtr1;
453
454 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
455 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
456 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
457 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
458 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
459 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
460
461 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
462 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
463 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
464
465 info->ndtr0cs0 = ndtr0;
466 info->ndtr1cs0 = ndtr1;
467 nand_writel(info, NDTR0CS0, ndtr0);
468 nand_writel(info, NDTR1CS0, ndtr1);
469 }
470
pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host * host,const struct nand_sdr_timings * t)471 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
472 const struct nand_sdr_timings *t)
473 {
474 struct pxa3xx_nand_info *info = host->info_data;
475 struct nand_chip *chip = &host->chip;
476 unsigned long nand_clk = mvebu_get_nand_clock();
477 uint32_t ndtr0, ndtr1;
478
479 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
480 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
481 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
482 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
483 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
484 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
485 u32 tR = chip->chip_delay * 1000;
486 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
487 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
488
489 /* fallback to a default value if tR = 0 */
490 if (!tR)
491 tR = 20000;
492
493 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
494 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
495 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
496 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
497 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
498 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
499
500 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
501 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
502 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
503
504 info->ndtr0cs0 = ndtr0;
505 info->ndtr1cs0 = ndtr1;
506 nand_writel(info, NDTR0CS0, ndtr0);
507 nand_writel(info, NDTR1CS0, ndtr1);
508 }
509
pxa3xx_nand_init_timings(struct pxa3xx_nand_host * host)510 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
511 {
512 const struct nand_sdr_timings *timings;
513 struct nand_chip *chip = &host->chip;
514 struct pxa3xx_nand_info *info = host->info_data;
515 const struct pxa3xx_nand_flash *f = NULL;
516 struct mtd_info *mtd = nand_to_mtd(&host->chip);
517 int mode, id, ntypes, i;
518
519 mode = onfi_get_async_timing_mode(chip);
520 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
521 ntypes = ARRAY_SIZE(builtin_flash_types);
522
523 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
524
525 id = chip->read_byte(mtd);
526 id |= chip->read_byte(mtd) << 0x8;
527
528 for (i = 0; i < ntypes; i++) {
529 f = &builtin_flash_types[i];
530
531 if (f->chip_id == id)
532 break;
533 }
534
535 if (i == ntypes) {
536 dev_err(mtd->dev, "Error: timings not found\n");
537 return -EINVAL;
538 }
539
540 pxa3xx_nand_set_timing(host, f->timing);
541
542 if (f->flash_width == 16) {
543 info->reg_ndcr |= NDCR_DWIDTH_M;
544 chip->options |= NAND_BUSWIDTH_16;
545 }
546
547 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
548 } else {
549 mode = fls(mode) - 1;
550 if (mode < 0)
551 mode = 0;
552
553 timings = onfi_async_timing_mode_to_sdr_timings(mode);
554 if (IS_ERR(timings))
555 return PTR_ERR(timings);
556
557 pxa3xx_nand_set_sdr_timing(host, timings);
558 }
559
560 return 0;
561 }
562
563 /**
564 * NOTE: it is a must to set ND_RUN first, then write
565 * command buffer, otherwise, it does not work.
566 * We enable all the interrupt at the same time, and
567 * let pxa3xx_nand_irq to handle all logic.
568 */
pxa3xx_nand_start(struct pxa3xx_nand_info * info)569 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
570 {
571 uint32_t ndcr;
572
573 ndcr = info->reg_ndcr;
574
575 if (info->use_ecc) {
576 ndcr |= NDCR_ECC_EN;
577 if (info->ecc_bch)
578 nand_writel(info, NDECCCTRL, 0x1);
579 } else {
580 ndcr &= ~NDCR_ECC_EN;
581 if (info->ecc_bch)
582 nand_writel(info, NDECCCTRL, 0x0);
583 }
584
585 ndcr &= ~NDCR_DMA_EN;
586
587 if (info->use_spare)
588 ndcr |= NDCR_SPARE_EN;
589 else
590 ndcr &= ~NDCR_SPARE_EN;
591
592 ndcr |= NDCR_ND_RUN;
593
594 /* clear status bits and run */
595 nand_writel(info, NDSR, NDSR_MASK);
596 nand_writel(info, NDCR, 0);
597 nand_writel(info, NDCR, ndcr);
598 }
599
disable_int(struct pxa3xx_nand_info * info,uint32_t int_mask)600 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
601 {
602 uint32_t ndcr;
603
604 ndcr = nand_readl(info, NDCR);
605 nand_writel(info, NDCR, ndcr | int_mask);
606 }
607
drain_fifo(struct pxa3xx_nand_info * info,void * data,int len)608 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
609 {
610 if (info->ecc_bch && !info->force_raw) {
611 u32 ts;
612
613 /*
614 * According to the datasheet, when reading from NDDB
615 * with BCH enabled, after each 32 bytes reads, we
616 * have to make sure that the NDSR.RDDREQ bit is set.
617 *
618 * Drain the FIFO 8 32 bits reads at a time, and skip
619 * the polling on the last read.
620 */
621 while (len > 8) {
622 readsl(info->mmio_base + NDDB, data, 8);
623
624 ts = get_timer(0);
625 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
626 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
627 dev_err(info->controller.active->mtd.dev,
628 "Timeout on RDDREQ while draining the FIFO\n");
629 return;
630 }
631 }
632
633 data += 32;
634 len -= 8;
635 }
636 }
637
638 readsl(info->mmio_base + NDDB, data, len);
639 }
640
handle_data_pio(struct pxa3xx_nand_info * info)641 static void handle_data_pio(struct pxa3xx_nand_info *info)
642 {
643 int data_len = info->step_chunk_size;
644
645 /*
646 * In raw mode, include the spare area and the ECC bytes that are not
647 * consumed by the controller in the data section. Do not reorganize
648 * here, do it in the ->read_page_raw() handler instead.
649 */
650 if (info->force_raw)
651 data_len += info->step_spare_size + info->ecc_size;
652
653 switch (info->state) {
654 case STATE_PIO_WRITING:
655 if (info->step_chunk_size)
656 writesl(info->mmio_base + NDDB,
657 info->data_buff + info->data_buff_pos,
658 DIV_ROUND_UP(data_len, 4));
659
660 if (info->step_spare_size)
661 writesl(info->mmio_base + NDDB,
662 info->oob_buff + info->oob_buff_pos,
663 DIV_ROUND_UP(info->step_spare_size, 4));
664 break;
665 case STATE_PIO_READING:
666 if (data_len)
667 drain_fifo(info,
668 info->data_buff + info->data_buff_pos,
669 DIV_ROUND_UP(data_len, 4));
670
671 if (info->force_raw)
672 break;
673
674 if (info->step_spare_size)
675 drain_fifo(info,
676 info->oob_buff + info->oob_buff_pos,
677 DIV_ROUND_UP(info->step_spare_size, 4));
678 break;
679 default:
680 dev_err(info->controller.active->mtd.dev,
681 "%s: invalid state %d\n", __func__, info->state);
682 BUG();
683 }
684
685 /* Update buffer pointers for multi-page read/write */
686 info->data_buff_pos += data_len;
687 info->oob_buff_pos += info->step_spare_size;
688 }
689
pxa3xx_nand_irq_thread(struct pxa3xx_nand_info * info)690 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
691 {
692 handle_data_pio(info);
693
694 info->state = STATE_CMD_DONE;
695 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
696 }
697
pxa3xx_nand_irq(struct pxa3xx_nand_info * info)698 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
699 {
700 unsigned int status, is_completed = 0, is_ready = 0;
701 unsigned int ready, cmd_done;
702 irqreturn_t ret = IRQ_HANDLED;
703
704 if (info->cs == 0) {
705 ready = NDSR_FLASH_RDY;
706 cmd_done = NDSR_CS0_CMDD;
707 } else {
708 ready = NDSR_RDY;
709 cmd_done = NDSR_CS1_CMDD;
710 }
711
712 /* TODO - find out why we need the delay during write operation. */
713 ndelay(1);
714
715 status = nand_readl(info, NDSR);
716
717 if (status & NDSR_UNCORERR)
718 info->retcode = ERR_UNCORERR;
719 if (status & NDSR_CORERR) {
720 info->retcode = ERR_CORERR;
721 if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
722 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
723 info->ecc_bch)
724 info->ecc_err_cnt = NDSR_ERR_CNT(status);
725 else
726 info->ecc_err_cnt = 1;
727
728 /*
729 * Each chunk composing a page is corrected independently,
730 * and we need to store maximum number of corrected bitflips
731 * to return it to the MTD layer in ecc.read_page().
732 */
733 info->max_bitflips = max_t(unsigned int,
734 info->max_bitflips,
735 info->ecc_err_cnt);
736 }
737 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
738 info->state = (status & NDSR_RDDREQ) ?
739 STATE_PIO_READING : STATE_PIO_WRITING;
740 /* Call the IRQ thread in U-Boot directly */
741 pxa3xx_nand_irq_thread(info);
742 return 0;
743 }
744 if (status & cmd_done) {
745 info->state = STATE_CMD_DONE;
746 is_completed = 1;
747 }
748 if (status & ready) {
749 info->state = STATE_READY;
750 is_ready = 1;
751 }
752
753 /*
754 * Clear all status bit before issuing the next command, which
755 * can and will alter the status bits and will deserve a new
756 * interrupt on its own. This lets the controller exit the IRQ
757 */
758 nand_writel(info, NDSR, status);
759
760 if (status & NDSR_WRCMDREQ) {
761 status &= ~NDSR_WRCMDREQ;
762 info->state = STATE_CMD_HANDLE;
763
764 /*
765 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
766 * must be loaded by writing directly either 12 or 16
767 * bytes directly to NDCB0, four bytes at a time.
768 *
769 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
770 * but each NDCBx register can be read.
771 */
772 nand_writel(info, NDCB0, info->ndcb0);
773 nand_writel(info, NDCB0, info->ndcb1);
774 nand_writel(info, NDCB0, info->ndcb2);
775
776 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
777 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
778 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
779 nand_writel(info, NDCB0, info->ndcb3);
780 }
781
782 if (is_completed)
783 info->cmd_complete = 1;
784 if (is_ready)
785 info->dev_ready = 1;
786
787 return ret;
788 }
789
is_buf_blank(uint8_t * buf,size_t len)790 static inline int is_buf_blank(uint8_t *buf, size_t len)
791 {
792 for (; len > 0; len--)
793 if (*buf++ != 0xff)
794 return 0;
795 return 1;
796 }
797
set_command_address(struct pxa3xx_nand_info * info,unsigned int page_size,uint16_t column,int page_addr)798 static void set_command_address(struct pxa3xx_nand_info *info,
799 unsigned int page_size, uint16_t column, int page_addr)
800 {
801 /* small page addr setting */
802 if (page_size < info->chunk_size) {
803 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
804 | (column & 0xFF);
805
806 info->ndcb2 = 0;
807 } else {
808 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
809 | (column & 0xFFFF);
810
811 if (page_addr & 0xFF0000)
812 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
813 else
814 info->ndcb2 = 0;
815 }
816 }
817
prepare_start_command(struct pxa3xx_nand_info * info,int command)818 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
819 {
820 struct pxa3xx_nand_host *host = info->host[info->cs];
821 struct mtd_info *mtd = nand_to_mtd(&host->chip);
822
823 /* reset data and oob column point to handle data */
824 info->buf_start = 0;
825 info->buf_count = 0;
826 info->data_buff_pos = 0;
827 info->oob_buff_pos = 0;
828 info->step_chunk_size = 0;
829 info->step_spare_size = 0;
830 info->cur_chunk = 0;
831 info->use_ecc = 0;
832 info->use_spare = 1;
833 info->retcode = ERR_NONE;
834 info->ecc_err_cnt = 0;
835 info->ndcb3 = 0;
836 info->need_wait = 0;
837
838 switch (command) {
839 case NAND_CMD_READ0:
840 case NAND_CMD_READOOB:
841 case NAND_CMD_PAGEPROG:
842 if (!info->force_raw)
843 info->use_ecc = 1;
844 break;
845 case NAND_CMD_PARAM:
846 info->use_spare = 0;
847 break;
848 default:
849 info->ndcb1 = 0;
850 info->ndcb2 = 0;
851 break;
852 }
853
854 /*
855 * If we are about to issue a read command, or about to set
856 * the write address, then clean the data buffer.
857 */
858 if (command == NAND_CMD_READ0 ||
859 command == NAND_CMD_READOOB ||
860 command == NAND_CMD_SEQIN) {
861 info->buf_count = mtd->writesize + mtd->oobsize;
862 memset(info->data_buff, 0xFF, info->buf_count);
863 }
864 }
865
prepare_set_command(struct pxa3xx_nand_info * info,int command,int ext_cmd_type,uint16_t column,int page_addr)866 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
867 int ext_cmd_type, uint16_t column, int page_addr)
868 {
869 int addr_cycle, exec_cmd;
870 struct pxa3xx_nand_host *host;
871 struct mtd_info *mtd;
872
873 host = info->host[info->cs];
874 mtd = nand_to_mtd(&host->chip);
875 addr_cycle = 0;
876 exec_cmd = 1;
877
878 if (info->cs != 0)
879 info->ndcb0 = NDCB0_CSEL;
880 else
881 info->ndcb0 = 0;
882
883 if (command == NAND_CMD_SEQIN)
884 exec_cmd = 0;
885
886 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
887 + host->col_addr_cycles);
888
889 switch (command) {
890 case NAND_CMD_READOOB:
891 case NAND_CMD_READ0:
892 info->buf_start = column;
893 info->ndcb0 |= NDCB0_CMD_TYPE(0)
894 | addr_cycle
895 | NAND_CMD_READ0;
896
897 if (command == NAND_CMD_READOOB)
898 info->buf_start += mtd->writesize;
899
900 if (info->cur_chunk < info->nfullchunks) {
901 info->step_chunk_size = info->chunk_size;
902 info->step_spare_size = info->spare_size;
903 } else {
904 info->step_chunk_size = info->last_chunk_size;
905 info->step_spare_size = info->last_spare_size;
906 }
907
908 /*
909 * Multiple page read needs an 'extended command type' field,
910 * which is either naked-read or last-read according to the
911 * state.
912 */
913 if (info->force_raw) {
914 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
915 NDCB0_LEN_OVRD |
916 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
917 info->ndcb3 = info->step_chunk_size +
918 info->step_spare_size + info->ecc_size;
919 } else if (mtd->writesize == info->chunk_size) {
920 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
921 } else if (mtd->writesize > info->chunk_size) {
922 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
923 | NDCB0_LEN_OVRD
924 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
925 info->ndcb3 = info->step_chunk_size +
926 info->step_spare_size;
927 }
928
929 set_command_address(info, mtd->writesize, column, page_addr);
930 break;
931
932 case NAND_CMD_SEQIN:
933
934 info->buf_start = column;
935 set_command_address(info, mtd->writesize, 0, page_addr);
936
937 /*
938 * Multiple page programming needs to execute the initial
939 * SEQIN command that sets the page address.
940 */
941 if (mtd->writesize > info->chunk_size) {
942 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
943 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
944 | addr_cycle
945 | command;
946 exec_cmd = 1;
947 }
948 break;
949
950 case NAND_CMD_PAGEPROG:
951 if (is_buf_blank(info->data_buff,
952 (mtd->writesize + mtd->oobsize))) {
953 exec_cmd = 0;
954 break;
955 }
956
957 if (info->cur_chunk < info->nfullchunks) {
958 info->step_chunk_size = info->chunk_size;
959 info->step_spare_size = info->spare_size;
960 } else {
961 info->step_chunk_size = info->last_chunk_size;
962 info->step_spare_size = info->last_spare_size;
963 }
964
965 /* Second command setting for large pages */
966 if (mtd->writesize > info->chunk_size) {
967 /*
968 * Multiple page write uses the 'extended command'
969 * field. This can be used to issue a command dispatch
970 * or a naked-write depending on the current stage.
971 */
972 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
973 | NDCB0_LEN_OVRD
974 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
975 info->ndcb3 = info->step_chunk_size +
976 info->step_spare_size;
977
978 /*
979 * This is the command dispatch that completes a chunked
980 * page program operation.
981 */
982 if (info->cur_chunk == info->ntotalchunks) {
983 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
984 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
985 | command;
986 info->ndcb1 = 0;
987 info->ndcb2 = 0;
988 info->ndcb3 = 0;
989 }
990 } else {
991 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
992 | NDCB0_AUTO_RS
993 | NDCB0_ST_ROW_EN
994 | NDCB0_DBC
995 | (NAND_CMD_PAGEPROG << 8)
996 | NAND_CMD_SEQIN
997 | addr_cycle;
998 }
999 break;
1000
1001 case NAND_CMD_PARAM:
1002 info->buf_count = INIT_BUFFER_SIZE;
1003 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1004 | NDCB0_ADDR_CYC(1)
1005 | NDCB0_LEN_OVRD
1006 | command;
1007 info->ndcb1 = (column & 0xFF);
1008 info->ndcb3 = INIT_BUFFER_SIZE;
1009 info->step_chunk_size = INIT_BUFFER_SIZE;
1010 break;
1011
1012 case NAND_CMD_READID:
1013 info->buf_count = READ_ID_BYTES;
1014 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1015 | NDCB0_ADDR_CYC(1)
1016 | command;
1017 info->ndcb1 = (column & 0xFF);
1018
1019 info->step_chunk_size = 8;
1020 break;
1021 case NAND_CMD_STATUS:
1022 info->buf_count = 1;
1023 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1024 | NDCB0_ADDR_CYC(1)
1025 | command;
1026
1027 info->step_chunk_size = 8;
1028 break;
1029
1030 case NAND_CMD_ERASE1:
1031 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1032 | NDCB0_AUTO_RS
1033 | NDCB0_ADDR_CYC(3)
1034 | NDCB0_DBC
1035 | (NAND_CMD_ERASE2 << 8)
1036 | NAND_CMD_ERASE1;
1037 info->ndcb1 = page_addr;
1038 info->ndcb2 = 0;
1039
1040 break;
1041 case NAND_CMD_RESET:
1042 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1043 | command;
1044
1045 break;
1046
1047 case NAND_CMD_ERASE2:
1048 exec_cmd = 0;
1049 break;
1050
1051 default:
1052 exec_cmd = 0;
1053 dev_err(mtd->dev, "non-supported command %x\n",
1054 command);
1055 break;
1056 }
1057
1058 return exec_cmd;
1059 }
1060
nand_cmdfunc(struct mtd_info * mtd,unsigned command,int column,int page_addr)1061 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1062 int column, int page_addr)
1063 {
1064 struct nand_chip *chip = mtd_to_nand(mtd);
1065 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1066 struct pxa3xx_nand_info *info = host->info_data;
1067 int exec_cmd;
1068
1069 /*
1070 * if this is a x16 device ,then convert the input
1071 * "byte" address into a "word" address appropriate
1072 * for indexing a word-oriented device
1073 */
1074 if (info->reg_ndcr & NDCR_DWIDTH_M)
1075 column /= 2;
1076
1077 /*
1078 * There may be different NAND chip hooked to
1079 * different chip select, so check whether
1080 * chip select has been changed, if yes, reset the timing
1081 */
1082 if (info->cs != host->cs) {
1083 info->cs = host->cs;
1084 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1085 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1086 }
1087
1088 prepare_start_command(info, command);
1089
1090 info->state = STATE_PREPARED;
1091 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1092
1093 if (exec_cmd) {
1094 u32 ts;
1095
1096 info->cmd_complete = 0;
1097 info->dev_ready = 0;
1098 info->need_wait = 1;
1099 pxa3xx_nand_start(info);
1100
1101 ts = get_timer(0);
1102 while (1) {
1103 u32 status;
1104
1105 status = nand_readl(info, NDSR);
1106 if (status)
1107 pxa3xx_nand_irq(info);
1108
1109 if (info->cmd_complete)
1110 break;
1111
1112 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1113 dev_err(mtd->dev, "Wait timeout!!!\n");
1114 return;
1115 }
1116 }
1117 }
1118 info->state = STATE_IDLE;
1119 }
1120
nand_cmdfunc_extended(struct mtd_info * mtd,const unsigned command,int column,int page_addr)1121 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1122 const unsigned command,
1123 int column, int page_addr)
1124 {
1125 struct nand_chip *chip = mtd_to_nand(mtd);
1126 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1127 struct pxa3xx_nand_info *info = host->info_data;
1128 int exec_cmd, ext_cmd_type;
1129
1130 /*
1131 * if this is a x16 device then convert the input
1132 * "byte" address into a "word" address appropriate
1133 * for indexing a word-oriented device
1134 */
1135 if (info->reg_ndcr & NDCR_DWIDTH_M)
1136 column /= 2;
1137
1138 /*
1139 * There may be different NAND chip hooked to
1140 * different chip select, so check whether
1141 * chip select has been changed, if yes, reset the timing
1142 */
1143 if (info->cs != host->cs) {
1144 info->cs = host->cs;
1145 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1146 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1147 }
1148
1149 /* Select the extended command for the first command */
1150 switch (command) {
1151 case NAND_CMD_READ0:
1152 case NAND_CMD_READOOB:
1153 ext_cmd_type = EXT_CMD_TYPE_MONO;
1154 break;
1155 case NAND_CMD_SEQIN:
1156 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1157 break;
1158 case NAND_CMD_PAGEPROG:
1159 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1160 break;
1161 default:
1162 ext_cmd_type = 0;
1163 break;
1164 }
1165
1166 prepare_start_command(info, command);
1167
1168 /*
1169 * Prepare the "is ready" completion before starting a command
1170 * transaction sequence. If the command is not executed the
1171 * completion will be completed, see below.
1172 *
1173 * We can do that inside the loop because the command variable
1174 * is invariant and thus so is the exec_cmd.
1175 */
1176 info->need_wait = 1;
1177 info->dev_ready = 0;
1178
1179 do {
1180 u32 ts;
1181
1182 info->state = STATE_PREPARED;
1183 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1184 column, page_addr);
1185 if (!exec_cmd) {
1186 info->need_wait = 0;
1187 info->dev_ready = 1;
1188 break;
1189 }
1190
1191 info->cmd_complete = 0;
1192 pxa3xx_nand_start(info);
1193
1194 ts = get_timer(0);
1195 while (1) {
1196 u32 status;
1197
1198 status = nand_readl(info, NDSR);
1199 if (status)
1200 pxa3xx_nand_irq(info);
1201
1202 if (info->cmd_complete)
1203 break;
1204
1205 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1206 dev_err(mtd->dev, "Wait timeout!!!\n");
1207 return;
1208 }
1209 }
1210
1211 /* Only a few commands need several steps */
1212 if (command != NAND_CMD_PAGEPROG &&
1213 command != NAND_CMD_READ0 &&
1214 command != NAND_CMD_READOOB)
1215 break;
1216
1217 info->cur_chunk++;
1218
1219 /* Check if the sequence is complete */
1220 if (info->cur_chunk == info->ntotalchunks &&
1221 command != NAND_CMD_PAGEPROG)
1222 break;
1223
1224 /*
1225 * After a splitted program command sequence has issued
1226 * the command dispatch, the command sequence is complete.
1227 */
1228 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1229 command == NAND_CMD_PAGEPROG &&
1230 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1231 break;
1232
1233 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1234 /* Last read: issue a 'last naked read' */
1235 if (info->cur_chunk == info->ntotalchunks - 1)
1236 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1237 else
1238 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1239
1240 /*
1241 * If a splitted program command has no more data to transfer,
1242 * the command dispatch must be issued to complete.
1243 */
1244 } else if (command == NAND_CMD_PAGEPROG &&
1245 info->cur_chunk == info->ntotalchunks) {
1246 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1247 }
1248 } while (1);
1249
1250 info->state = STATE_IDLE;
1251 }
1252
pxa3xx_nand_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1253 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1254 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1255 int page)
1256 {
1257 chip->write_buf(mtd, buf, mtd->writesize);
1258 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1259
1260 return 0;
1261 }
1262
pxa3xx_nand_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1263 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1264 struct nand_chip *chip, uint8_t *buf, int oob_required,
1265 int page)
1266 {
1267 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1268 struct pxa3xx_nand_info *info = host->info_data;
1269 int bf;
1270
1271 chip->read_buf(mtd, buf, mtd->writesize);
1272 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1273
1274 if (info->retcode == ERR_CORERR && info->use_ecc) {
1275 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1276
1277 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1278 /*
1279 * Empty pages will trigger uncorrectable errors. Re-read the
1280 * entire page in raw mode and check for bits not being "1".
1281 * If there are more than the supported strength, then it means
1282 * this is an actual uncorrectable error.
1283 */
1284 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1285 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1286 chip->oob_poi, mtd->oobsize,
1287 NULL, 0, chip->ecc.strength);
1288 if (bf < 0) {
1289 mtd->ecc_stats.failed++;
1290 } else if (bf) {
1291 mtd->ecc_stats.corrected += bf;
1292 info->max_bitflips = max_t(unsigned int,
1293 info->max_bitflips, bf);
1294 info->retcode = ERR_CORERR;
1295 } else {
1296 info->retcode = ERR_NONE;
1297 }
1298
1299 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1300 /* Raw read is not supported with Hamming ECC engine */
1301 if (is_buf_blank(buf, mtd->writesize))
1302 info->retcode = ERR_NONE;
1303 else
1304 mtd->ecc_stats.failed++;
1305 }
1306
1307 return info->max_bitflips;
1308 }
1309
pxa3xx_nand_read_page_raw(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1310 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1311 struct nand_chip *chip, uint8_t *buf,
1312 int oob_required, int page)
1313 {
1314 struct pxa3xx_nand_host *host = chip->priv;
1315 struct pxa3xx_nand_info *info = host->info_data;
1316 int chunk, ecc_off_buf;
1317
1318 if (!info->ecc_bch)
1319 return -ENOTSUPP;
1320
1321 /*
1322 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1323 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1324 */
1325 info->force_raw = true;
1326 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1327
1328 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1329 info->last_spare_size;
1330 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1331 chip->read_buf(mtd,
1332 buf + (chunk * info->chunk_size),
1333 info->chunk_size);
1334 chip->read_buf(mtd,
1335 chip->oob_poi +
1336 (chunk * (info->spare_size)),
1337 info->spare_size);
1338 chip->read_buf(mtd,
1339 chip->oob_poi + ecc_off_buf +
1340 (chunk * (info->ecc_size)),
1341 info->ecc_size - 2);
1342 }
1343
1344 if (info->ntotalchunks > info->nfullchunks) {
1345 chip->read_buf(mtd,
1346 buf + (info->nfullchunks * info->chunk_size),
1347 info->last_chunk_size);
1348 chip->read_buf(mtd,
1349 chip->oob_poi +
1350 (info->nfullchunks * (info->spare_size)),
1351 info->last_spare_size);
1352 chip->read_buf(mtd,
1353 chip->oob_poi + ecc_off_buf +
1354 (info->nfullchunks * (info->ecc_size)),
1355 info->ecc_size - 2);
1356 }
1357
1358 info->force_raw = false;
1359
1360 return 0;
1361 }
1362
pxa3xx_nand_read_oob_raw(struct mtd_info * mtd,struct nand_chip * chip,int page)1363 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1364 struct nand_chip *chip, int page)
1365 {
1366 /* Invalidate page cache */
1367 chip->pagebuf = -1;
1368
1369 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1370 page);
1371 }
1372
pxa3xx_nand_read_byte(struct mtd_info * mtd)1373 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1374 {
1375 struct nand_chip *chip = mtd_to_nand(mtd);
1376 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1377 struct pxa3xx_nand_info *info = host->info_data;
1378 char retval = 0xFF;
1379
1380 if (info->buf_start < info->buf_count)
1381 /* Has just send a new command? */
1382 retval = info->data_buff[info->buf_start++];
1383
1384 return retval;
1385 }
1386
pxa3xx_nand_read_word(struct mtd_info * mtd)1387 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1388 {
1389 struct nand_chip *chip = mtd_to_nand(mtd);
1390 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1391 struct pxa3xx_nand_info *info = host->info_data;
1392 u16 retval = 0xFFFF;
1393
1394 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1395 retval = *((u16 *)(info->data_buff+info->buf_start));
1396 info->buf_start += 2;
1397 }
1398 return retval;
1399 }
1400
pxa3xx_nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)1401 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1402 {
1403 struct nand_chip *chip = mtd_to_nand(mtd);
1404 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1405 struct pxa3xx_nand_info *info = host->info_data;
1406 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1407
1408 memcpy(buf, info->data_buff + info->buf_start, real_len);
1409 info->buf_start += real_len;
1410 }
1411
pxa3xx_nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)1412 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1413 const uint8_t *buf, int len)
1414 {
1415 struct nand_chip *chip = mtd_to_nand(mtd);
1416 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1417 struct pxa3xx_nand_info *info = host->info_data;
1418 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1419
1420 memcpy(info->data_buff + info->buf_start, buf, real_len);
1421 info->buf_start += real_len;
1422 }
1423
pxa3xx_nand_select_chip(struct mtd_info * mtd,int chip)1424 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1425 {
1426 return;
1427 }
1428
pxa3xx_nand_waitfunc(struct mtd_info * mtd,struct nand_chip * this)1429 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1430 {
1431 struct nand_chip *chip = mtd_to_nand(mtd);
1432 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1433 struct pxa3xx_nand_info *info = host->info_data;
1434
1435 if (info->need_wait) {
1436 u32 ts;
1437
1438 info->need_wait = 0;
1439
1440 ts = get_timer(0);
1441 while (1) {
1442 u32 status;
1443
1444 status = nand_readl(info, NDSR);
1445 if (status)
1446 pxa3xx_nand_irq(info);
1447
1448 if (info->dev_ready)
1449 break;
1450
1451 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1452 dev_err(mtd->dev, "Ready timeout!!!\n");
1453 return NAND_STATUS_FAIL;
1454 }
1455 }
1456 }
1457
1458 /* pxa3xx_nand_send_command has waited for command complete */
1459 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1460 if (info->retcode == ERR_NONE)
1461 return 0;
1462 else
1463 return NAND_STATUS_FAIL;
1464 }
1465
1466 return NAND_STATUS_READY;
1467 }
1468
pxa3xx_nand_config_ident(struct pxa3xx_nand_info * info)1469 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1470 {
1471 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1472
1473 /* Configure default flash values */
1474 info->reg_ndcr = 0x0; /* enable all interrupts */
1475 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1476 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1477 info->reg_ndcr |= NDCR_SPARE_EN;
1478
1479 return 0;
1480 }
1481
pxa3xx_nand_config_tail(struct pxa3xx_nand_info * info)1482 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1483 {
1484 struct pxa3xx_nand_host *host = info->host[info->cs];
1485 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1486 struct nand_chip *chip = mtd_to_nand(mtd);
1487
1488 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1489 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1490 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1491 }
1492
pxa3xx_nand_detect_config(struct pxa3xx_nand_info * info)1493 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1494 {
1495 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1496 uint32_t ndcr = nand_readl(info, NDCR);
1497
1498 /* Set an initial chunk size */
1499 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1500 info->reg_ndcr = ndcr &
1501 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1502 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1503 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1504 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1505 }
1506
pxa3xx_nand_init_buff(struct pxa3xx_nand_info * info)1507 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1508 {
1509 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1510 if (info->data_buff == NULL)
1511 return -ENOMEM;
1512 return 0;
1513 }
1514
pxa3xx_nand_sensing(struct pxa3xx_nand_host * host)1515 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1516 {
1517 struct pxa3xx_nand_info *info = host->info_data;
1518 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1519 struct mtd_info *mtd;
1520 struct nand_chip *chip;
1521 const struct nand_sdr_timings *timings;
1522 int ret;
1523
1524 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1525 chip = mtd_to_nand(mtd);
1526
1527 /* configure default flash values */
1528 info->reg_ndcr = 0x0; /* enable all interrupts */
1529 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1530 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1531 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1532
1533 /* use the common timing to make a try */
1534 timings = onfi_async_timing_mode_to_sdr_timings(0);
1535 if (IS_ERR(timings))
1536 return PTR_ERR(timings);
1537
1538 pxa3xx_nand_set_sdr_timing(host, timings);
1539
1540 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1541 ret = chip->waitfunc(mtd, chip);
1542 if (ret & NAND_STATUS_FAIL)
1543 return -ENODEV;
1544
1545 return 0;
1546 }
1547
pxa_ecc_init(struct pxa3xx_nand_info * info,struct nand_ecc_ctrl * ecc,int strength,int ecc_stepsize,int page_size)1548 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1549 struct nand_ecc_ctrl *ecc,
1550 int strength, int ecc_stepsize, int page_size)
1551 {
1552 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1553 info->nfullchunks = 1;
1554 info->ntotalchunks = 1;
1555 info->chunk_size = 2048;
1556 info->spare_size = 40;
1557 info->ecc_size = 24;
1558 ecc->mode = NAND_ECC_HW;
1559 ecc->size = 512;
1560 ecc->strength = 1;
1561
1562 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1563 info->nfullchunks = 1;
1564 info->ntotalchunks = 1;
1565 info->chunk_size = 512;
1566 info->spare_size = 8;
1567 info->ecc_size = 8;
1568 ecc->mode = NAND_ECC_HW;
1569 ecc->size = 512;
1570 ecc->strength = 1;
1571
1572 /*
1573 * Required ECC: 4-bit correction per 512 bytes
1574 * Select: 16-bit correction per 2048 bytes
1575 */
1576 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1577 info->ecc_bch = 1;
1578 info->nfullchunks = 1;
1579 info->ntotalchunks = 1;
1580 info->chunk_size = 2048;
1581 info->spare_size = 32;
1582 info->ecc_size = 32;
1583 ecc->mode = NAND_ECC_HW;
1584 ecc->size = info->chunk_size;
1585 ecc->layout = &ecc_layout_2KB_bch4bit;
1586 ecc->strength = 16;
1587
1588 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1589 info->ecc_bch = 1;
1590 info->nfullchunks = 2;
1591 info->ntotalchunks = 2;
1592 info->chunk_size = 2048;
1593 info->spare_size = 32;
1594 info->ecc_size = 32;
1595 ecc->mode = NAND_ECC_HW;
1596 ecc->size = info->chunk_size;
1597 ecc->layout = &ecc_layout_4KB_bch4bit;
1598 ecc->strength = 16;
1599
1600 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1601 info->ecc_bch = 1;
1602 info->nfullchunks = 4;
1603 info->ntotalchunks = 4;
1604 info->chunk_size = 2048;
1605 info->spare_size = 32;
1606 info->ecc_size = 32;
1607 ecc->mode = NAND_ECC_HW;
1608 ecc->size = info->chunk_size;
1609 ecc->layout = &ecc_layout_8KB_bch4bit;
1610 ecc->strength = 16;
1611
1612 /*
1613 * Required ECC: 8-bit correction per 512 bytes
1614 * Select: 16-bit correction per 1024 bytes
1615 */
1616 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1617 info->ecc_bch = 1;
1618 info->nfullchunks = 1;
1619 info->ntotalchunks = 2;
1620 info->chunk_size = 1024;
1621 info->spare_size = 0;
1622 info->last_chunk_size = 1024;
1623 info->last_spare_size = 32;
1624 info->ecc_size = 32;
1625 ecc->mode = NAND_ECC_HW;
1626 ecc->size = info->chunk_size;
1627 ecc->layout = &ecc_layout_2KB_bch8bit;
1628 ecc->strength = 16;
1629
1630 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1631 info->ecc_bch = 1;
1632 info->nfullchunks = 4;
1633 info->ntotalchunks = 5;
1634 info->chunk_size = 1024;
1635 info->spare_size = 0;
1636 info->last_chunk_size = 0;
1637 info->last_spare_size = 64;
1638 info->ecc_size = 32;
1639 ecc->mode = NAND_ECC_HW;
1640 ecc->size = info->chunk_size;
1641 ecc->layout = &ecc_layout_4KB_bch8bit;
1642 ecc->strength = 16;
1643
1644 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1645 info->ecc_bch = 1;
1646 info->nfullchunks = 8;
1647 info->ntotalchunks = 9;
1648 info->chunk_size = 1024;
1649 info->spare_size = 0;
1650 info->last_chunk_size = 0;
1651 info->last_spare_size = 160;
1652 info->ecc_size = 32;
1653 ecc->mode = NAND_ECC_HW;
1654 ecc->size = info->chunk_size;
1655 ecc->layout = &ecc_layout_8KB_bch8bit;
1656 ecc->strength = 16;
1657
1658 } else {
1659 dev_err(info->controller.active->mtd.dev,
1660 "ECC strength %d at page size %d is not supported\n",
1661 strength, page_size);
1662 return -ENODEV;
1663 }
1664
1665 return 0;
1666 }
1667
pxa3xx_nand_scan(struct mtd_info * mtd)1668 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1669 {
1670 struct nand_chip *chip = mtd_to_nand(mtd);
1671 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1672 struct pxa3xx_nand_info *info = host->info_data;
1673 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1674 int ret;
1675 uint16_t ecc_strength, ecc_step;
1676
1677 if (pdata->keep_config) {
1678 pxa3xx_nand_detect_config(info);
1679 } else {
1680 ret = pxa3xx_nand_config_ident(info);
1681 if (ret)
1682 return ret;
1683 ret = pxa3xx_nand_sensing(host);
1684 if (ret) {
1685 dev_info(mtd->dev, "There is no chip on cs %d!\n",
1686 info->cs);
1687 return ret;
1688 }
1689 }
1690
1691 /* Device detection must be done with ECC disabled */
1692 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1693 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
1694 nand_writel(info, NDECCCTRL, 0x0);
1695
1696 if (nand_scan_ident(mtd, 1, NULL))
1697 return -ENODEV;
1698
1699 if (!pdata->keep_config) {
1700 ret = pxa3xx_nand_init_timings(host);
1701 if (ret) {
1702 dev_err(mtd->dev,
1703 "Failed to set timings: %d\n", ret);
1704 return ret;
1705 }
1706 }
1707
1708 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1709 /*
1710 * We'll use a bad block table stored in-flash and don't
1711 * allow writing the bad block marker to the flash.
1712 */
1713 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1714 chip->bbt_td = &bbt_main_descr;
1715 chip->bbt_md = &bbt_mirror_descr;
1716 #endif
1717
1718 if (pdata->ecc_strength && pdata->ecc_step_size) {
1719 ecc_strength = pdata->ecc_strength;
1720 ecc_step = pdata->ecc_step_size;
1721 } else {
1722 ecc_strength = chip->ecc_strength_ds;
1723 ecc_step = chip->ecc_step_ds;
1724 }
1725
1726 /* Set default ECC strength requirements on non-ONFI devices */
1727 if (ecc_strength < 1 && ecc_step < 1) {
1728 ecc_strength = 1;
1729 ecc_step = 512;
1730 }
1731
1732 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1733 ecc_step, mtd->writesize);
1734 if (ret)
1735 return ret;
1736
1737 /*
1738 * If the page size is bigger than the FIFO size, let's check
1739 * we are given the right variant and then switch to the extended
1740 * (aka split) command handling,
1741 */
1742 if (mtd->writesize > info->chunk_size) {
1743 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1744 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1745 chip->cmdfunc = nand_cmdfunc_extended;
1746 } else {
1747 dev_err(mtd->dev,
1748 "unsupported page size on this variant\n");
1749 return -ENODEV;
1750 }
1751 }
1752
1753 /* calculate addressing information */
1754 if (mtd->writesize >= 2048)
1755 host->col_addr_cycles = 2;
1756 else
1757 host->col_addr_cycles = 1;
1758
1759 /* release the initial buffer */
1760 kfree(info->data_buff);
1761
1762 /* allocate the real data + oob buffer */
1763 info->buf_size = mtd->writesize + mtd->oobsize;
1764 ret = pxa3xx_nand_init_buff(info);
1765 if (ret)
1766 return ret;
1767 info->oob_buff = info->data_buff + mtd->writesize;
1768
1769 if ((mtd->size >> chip->page_shift) > 65536)
1770 host->row_addr_cycles = 3;
1771 else
1772 host->row_addr_cycles = 2;
1773
1774 if (!pdata->keep_config)
1775 pxa3xx_nand_config_tail(info);
1776
1777 return nand_scan_tail(mtd);
1778 }
1779
alloc_nand_resource(struct udevice * dev,struct pxa3xx_nand_info * info)1780 static int alloc_nand_resource(struct udevice *dev, struct pxa3xx_nand_info *info)
1781 {
1782 struct pxa3xx_nand_platform_data *pdata;
1783 struct pxa3xx_nand_host *host;
1784 struct nand_chip *chip = NULL;
1785 struct mtd_info *mtd;
1786 int cs;
1787
1788 pdata = info->pdata;
1789 if (pdata->num_cs <= 0)
1790 return -ENODEV;
1791
1792 info->variant = pxa3xx_nand_get_variant(dev);
1793 for (cs = 0; cs < pdata->num_cs; cs++) {
1794 chip = (struct nand_chip *)
1795 ((u8 *)&info[1] + sizeof(*host) * cs);
1796 mtd = nand_to_mtd(chip);
1797 host = (struct pxa3xx_nand_host *)chip;
1798 info->host[cs] = host;
1799 host->cs = cs;
1800 host->info_data = info;
1801 mtd->owner = THIS_MODULE;
1802
1803 nand_set_controller_data(chip, host);
1804 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1805 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1806 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1807 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1808 chip->controller = &info->controller;
1809 chip->waitfunc = pxa3xx_nand_waitfunc;
1810 chip->select_chip = pxa3xx_nand_select_chip;
1811 chip->read_word = pxa3xx_nand_read_word;
1812 chip->read_byte = pxa3xx_nand_read_byte;
1813 chip->read_buf = pxa3xx_nand_read_buf;
1814 chip->write_buf = pxa3xx_nand_write_buf;
1815 chip->options |= NAND_NO_SUBPAGE_WRITE;
1816 chip->cmdfunc = nand_cmdfunc;
1817 }
1818
1819 /* Allocate a buffer to allow flash detection */
1820 info->buf_size = INIT_BUFFER_SIZE;
1821 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1822 if (info->data_buff == NULL)
1823 return -ENOMEM;
1824
1825 /* initialize all interrupts to be disabled */
1826 disable_int(info, NDSR_MASK);
1827
1828 /*
1829 * Some SoCs like A7k/A8k need to enable manually the NAND
1830 * controller to avoid being bootloader dependent. This is done
1831 * through the use of a single bit in the System Functions registers.
1832 */
1833 if (pxa3xx_nand_get_variant(dev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1834 struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
1835 dev, "marvell,system-controller");
1836 u32 reg;
1837
1838 if (IS_ERR(sysctrl_base))
1839 return PTR_ERR(sysctrl_base);
1840
1841 regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, ®);
1842 reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
1843 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
1844 }
1845
1846 return 0;
1847 }
1848
pxa3xx_nand_probe_dt(struct udevice * dev,struct pxa3xx_nand_info * info)1849 static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info)
1850 {
1851 struct pxa3xx_nand_platform_data *pdata;
1852
1853 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1854 if (!pdata)
1855 return -ENOMEM;
1856
1857 info->mmio_base = dev_read_addr_ptr(dev);
1858
1859 pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
1860 if (pdata->num_cs != 1) {
1861 pr_err("pxa3xx driver supports single CS only\n");
1862 return -EINVAL;
1863 }
1864
1865 if (dev_read_bool(dev, "nand-enable-arbiter"))
1866 pdata->enable_arbiter = 1;
1867
1868 if (dev_read_bool(dev, "nand-keep-config"))
1869 pdata->keep_config = 1;
1870
1871 /*
1872 * ECC parameters.
1873 * If these are not set, they will be selected according
1874 * to the detected flash type.
1875 */
1876 /* ECC strength */
1877 pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0);
1878
1879 /* ECC step size */
1880 pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size",
1881 0);
1882
1883 info->pdata = pdata;
1884
1885 return 0;
1886 }
1887
pxa3xx_nand_probe(struct udevice * dev)1888 static int pxa3xx_nand_probe(struct udevice *dev)
1889 {
1890 struct pxa3xx_nand_platform_data *pdata;
1891 int ret, cs, probe_success;
1892 struct pxa3xx_nand_info *info = dev_get_priv(dev);
1893
1894 ret = pxa3xx_nand_probe_dt(dev, info);
1895 if (ret)
1896 return ret;
1897
1898 pdata = info->pdata;
1899
1900 ret = alloc_nand_resource(dev, info);
1901 if (ret) {
1902 dev_err(dev, "alloc nand resource failed\n");
1903 return ret;
1904 }
1905
1906 probe_success = 0;
1907 for (cs = 0; cs < pdata->num_cs; cs++) {
1908 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1909
1910 /*
1911 * The mtd name matches the one used in 'mtdparts' kernel
1912 * parameter. This name cannot be changed or otherwise
1913 * user's mtd partitions configuration would get broken.
1914 */
1915 mtd->name = "pxa3xx_nand-0";
1916 info->cs = cs;
1917 ret = pxa3xx_nand_scan(mtd);
1918 if (ret) {
1919 dev_info(mtd->dev, "failed to scan nand at cs %d\n",
1920 cs);
1921 continue;
1922 }
1923
1924 if (nand_register(cs, mtd))
1925 continue;
1926
1927 probe_success = 1;
1928 }
1929
1930 if (!probe_success)
1931 return -ENODEV;
1932
1933 return 0;
1934 }
1935
1936 U_BOOT_DRIVER(pxa3xx_nand) = {
1937 .name = "pxa3xx-nand",
1938 .id = UCLASS_MTD,
1939 .of_match = pxa3xx_nand_dt_ids,
1940 .probe = pxa3xx_nand_probe,
1941 .priv_auto = sizeof(struct pxa3xx_nand_info) +
1942 sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE,
1943 };
1944
board_nand_init(void)1945 void board_nand_init(void)
1946 {
1947 struct udevice *dev;
1948 int ret;
1949
1950 ret = uclass_get_device_by_driver(UCLASS_MTD,
1951 DM_DRIVER_GET(pxa3xx_nand), &dev);
1952 if (ret && ret != -ENODEV) {
1953 pr_err("Failed to initialize %s. (error %d)\n", dev->name,
1954 ret);
1955 }
1956 }
1957