1 /*
2  * Copyright (c) 2019-2020, STMicroelectronics - All Rights Reserved
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stddef.h>
10 
11 #include <platform_def.h>
12 
13 #include <common/debug.h>
14 #include <drivers/delay_timer.h>
15 #include <drivers/raw_nand.h>
16 #include <lib/utils.h>
17 
18 #define ONFI_SIGNATURE_ADDR	0x20U
19 
20 /* CRC calculation */
21 #define CRC_POLYNOM		0x8005U
22 #define CRC_INIT_VALUE		0x4F4EU
23 
24 /* Status register */
25 #define NAND_STATUS_READY	BIT(6)
26 
27 #define SZ_128M			0x08000000U
28 #define SZ_512			0x200U
29 
30 static struct rawnand_device rawnand_dev;
31 
32 #pragma weak plat_get_raw_nand_data
plat_get_raw_nand_data(struct rawnand_device * device)33 int plat_get_raw_nand_data(struct rawnand_device *device)
34 {
35 	return 0;
36 }
37 
nand_send_cmd(uint8_t cmd,unsigned int tim)38 static int nand_send_cmd(uint8_t cmd, unsigned int tim)
39 {
40 	struct nand_req req;
41 
42 	zeromem(&req, sizeof(struct nand_req));
43 	req.nand = rawnand_dev.nand_dev;
44 	req.type = NAND_REQ_CMD | cmd;
45 	req.inst_delay = tim;
46 
47 	return rawnand_dev.ops->exec(&req);
48 }
49 
nand_send_addr(uint8_t addr,unsigned int tim)50 static int nand_send_addr(uint8_t addr, unsigned int tim)
51 {
52 	struct nand_req req;
53 
54 	zeromem(&req, sizeof(struct nand_req));
55 	req.nand = rawnand_dev.nand_dev;
56 	req.type = NAND_REQ_ADDR;
57 	req.addr = &addr;
58 	req.inst_delay = tim;
59 
60 	return rawnand_dev.ops->exec(&req);
61 }
62 
nand_send_wait(unsigned int delay,unsigned int tim)63 static int nand_send_wait(unsigned int delay, unsigned int tim)
64 {
65 	struct nand_req req;
66 
67 	zeromem(&req, sizeof(struct nand_req));
68 	req.nand = rawnand_dev.nand_dev;
69 	req.type = NAND_REQ_WAIT;
70 	req.inst_delay = tim;
71 	req.delay_ms = delay;
72 
73 	return rawnand_dev.ops->exec(&req);
74 }
75 
76 
nand_read_data(uint8_t * data,unsigned int length,bool use_8bit)77 static int nand_read_data(uint8_t *data, unsigned int length, bool use_8bit)
78 {
79 	struct nand_req req;
80 
81 	zeromem(&req, sizeof(struct nand_req));
82 	req.nand = rawnand_dev.nand_dev;
83 	req.type = NAND_REQ_DATAIN | (use_8bit ? NAND_REQ_BUS_WIDTH_8 : 0U);
84 	req.addr = data;
85 	req.length = length;
86 
87 	return rawnand_dev.ops->exec(&req);
88 }
89 
nand_change_read_column_cmd(unsigned int offset,uintptr_t buffer,unsigned int len)90 int nand_change_read_column_cmd(unsigned int offset, uintptr_t buffer,
91 				unsigned int len)
92 {
93 	int ret;
94 	uint8_t addr[2];
95 	unsigned int i;
96 
97 	ret = nand_send_cmd(NAND_CMD_CHANGE_1ST, 0U);
98 	if (ret !=  0) {
99 		return ret;
100 	}
101 
102 	if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
103 		offset /= 2U;
104 	}
105 
106 	addr[0] = offset;
107 	addr[1] = offset >> 8;
108 
109 	for (i = 0; i < 2U; i++) {
110 		ret = nand_send_addr(addr[i], 0U);
111 		if (ret !=  0) {
112 			return ret;
113 		}
114 	}
115 
116 	ret = nand_send_cmd(NAND_CMD_CHANGE_2ND, NAND_TCCS_MIN);
117 	if (ret !=  0) {
118 		return ret;
119 	}
120 
121 	return nand_read_data((uint8_t *)buffer, len, false);
122 }
123 
nand_read_page_cmd(unsigned int page,unsigned int offset,uintptr_t buffer,unsigned int len)124 int nand_read_page_cmd(unsigned int page, unsigned int offset,
125 		       uintptr_t buffer, unsigned int len)
126 {
127 	uint8_t addr[5];
128 	uint8_t i = 0U;
129 	uint8_t j;
130 	int ret;
131 
132 	VERBOSE(">%s page %u offset %u buffer 0x%lx\n", __func__, page, offset,
133 		buffer);
134 
135 	if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
136 		offset /= 2U;
137 	}
138 
139 	addr[i++] = offset;
140 	addr[i++] = offset >> 8;
141 
142 	addr[i++] = page;
143 	addr[i++] = page >> 8;
144 	if (rawnand_dev.nand_dev->size > SZ_128M) {
145 		addr[i++] = page >> 16;
146 	}
147 
148 	ret = nand_send_cmd(NAND_CMD_READ_1ST, 0U);
149 	if (ret != 0) {
150 		return ret;
151 	}
152 
153 	for (j = 0U; j < i; j++) {
154 		ret = nand_send_addr(addr[j], 0U);
155 		if (ret != 0) {
156 			return ret;
157 		}
158 	}
159 
160 	ret = nand_send_cmd(NAND_CMD_READ_2ND, NAND_TWB_MAX);
161 	if (ret != 0) {
162 		return ret;
163 	}
164 
165 	ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
166 	if (ret != 0) {
167 		return ret;
168 	}
169 
170 	if (buffer != 0U) {
171 		ret = nand_read_data((uint8_t *)buffer, len, false);
172 	}
173 
174 	return ret;
175 }
176 
nand_status(uint8_t * status)177 static int nand_status(uint8_t *status)
178 {
179 	int ret;
180 
181 	ret = nand_send_cmd(NAND_CMD_STATUS, NAND_TWHR_MIN);
182 	if (ret != 0) {
183 		return ret;
184 	}
185 
186 	if (status != NULL) {
187 		ret = nand_read_data(status, 1U, true);
188 	}
189 
190 	return ret;
191 }
192 
nand_wait_ready(unsigned int delay_ms)193 int nand_wait_ready(unsigned int delay_ms)
194 {
195 	uint8_t status;
196 	int ret;
197 	uint64_t timeout;
198 
199 	/* Wait before reading status */
200 	udelay(1);
201 
202 	ret = nand_status(NULL);
203 	if (ret != 0) {
204 		return ret;
205 	}
206 
207 	timeout = timeout_init_us(delay_ms * 1000U);
208 	while (!timeout_elapsed(timeout)) {
209 		ret = nand_read_data(&status, 1U, true);
210 		if (ret != 0) {
211 			return ret;
212 		}
213 
214 		if ((status & NAND_STATUS_READY) != 0U) {
215 			return nand_send_cmd(NAND_CMD_READ_1ST, 0U);
216 		}
217 
218 		udelay(10);
219 	}
220 
221 	return -ETIMEDOUT;
222 }
223 
224 #if NAND_ONFI_DETECT
nand_check_crc(uint16_t crc,uint8_t * data_in,unsigned int data_len)225 static uint16_t nand_check_crc(uint16_t crc, uint8_t *data_in,
226 			       unsigned int data_len)
227 {
228 	uint32_t i;
229 	uint32_t j;
230 	uint32_t bit;
231 
232 	for (i = 0U; i < data_len; i++) {
233 		uint8_t cur_param = *data_in++;
234 
235 		for (j = BIT(7); j != 0U; j >>= 1) {
236 			bit = crc & BIT(15);
237 			crc <<= 1;
238 
239 			if ((cur_param & j) != 0U) {
240 				bit ^= BIT(15);
241 			}
242 
243 			if (bit != 0U) {
244 				crc ^= CRC_POLYNOM;
245 			}
246 		}
247 
248 		crc &= GENMASK(15, 0);
249 	}
250 
251 	return crc;
252 }
253 
nand_read_id(uint8_t addr,uint8_t * id,unsigned int size)254 static int nand_read_id(uint8_t addr, uint8_t *id, unsigned int size)
255 {
256 	int ret;
257 
258 	ret = nand_send_cmd(NAND_CMD_READID, 0U);
259 	if (ret !=  0) {
260 		return ret;
261 	}
262 
263 	ret = nand_send_addr(addr, NAND_TWHR_MIN);
264 	if (ret !=  0) {
265 		return ret;
266 	}
267 
268 	return nand_read_data(id, size, true);
269 }
270 
nand_reset(void)271 static int nand_reset(void)
272 {
273 	int ret;
274 
275 	ret = nand_send_cmd(NAND_CMD_RESET, NAND_TWB_MAX);
276 	if (ret != 0) {
277 		return ret;
278 	}
279 
280 	return nand_send_wait(PSEC_TO_MSEC(NAND_TRST_MAX), 0U);
281 }
282 
nand_read_param_page(void)283 static int nand_read_param_page(void)
284 {
285 	struct nand_param_page page;
286 	uint8_t addr = 0U;
287 	int ret;
288 
289 	ret = nand_send_cmd(NAND_CMD_READ_PARAM_PAGE, 0U);
290 	if (ret != 0) {
291 		return ret;
292 	}
293 
294 	ret = nand_send_addr(addr, NAND_TWB_MAX);
295 	if (ret != 0) {
296 		return ret;
297 	}
298 
299 	ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
300 	if (ret != 0) {
301 		return ret;
302 	}
303 
304 	ret = nand_read_data((uint8_t *)&page, sizeof(page), true);
305 	if (ret != 0) {
306 		return ret;
307 	}
308 
309 	if (strncmp((char *)&page.page_sig, "ONFI", 4) != 0) {
310 		WARN("Error ONFI detection\n");
311 		return -EINVAL;
312 	}
313 
314 	if (nand_check_crc(CRC_INIT_VALUE, (uint8_t *)&page, 254U) !=
315 	    page.crc16) {
316 		WARN("Error reading param\n");
317 		return -EINVAL;
318 	}
319 
320 	if ((page.features & ONFI_FEAT_BUS_WIDTH_16) != 0U) {
321 		rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_16;
322 	} else {
323 		rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_8;
324 	}
325 
326 	rawnand_dev.nand_dev->block_size = page.num_pages_per_blk *
327 					   page.bytes_per_page;
328 	rawnand_dev.nand_dev->page_size = page.bytes_per_page;
329 	rawnand_dev.nand_dev->size = page.num_pages_per_blk *
330 				     page.bytes_per_page *
331 				     page.num_blk_in_lun * page.num_lun;
332 
333 	if (page.nb_ecc_bits != GENMASK_32(7, 0)) {
334 		rawnand_dev.nand_dev->ecc.max_bit_corr = page.nb_ecc_bits;
335 		rawnand_dev.nand_dev->ecc.size = SZ_512;
336 	}
337 
338 	VERBOSE("Page size %u, block_size %u, Size %llu, ecc %u, buswidth %u\n",
339 		rawnand_dev.nand_dev->page_size,
340 		rawnand_dev.nand_dev->block_size, rawnand_dev.nand_dev->size,
341 		rawnand_dev.nand_dev->ecc.max_bit_corr,
342 		rawnand_dev.nand_dev->buswidth);
343 
344 	return 0;
345 }
346 
detect_onfi(void)347 static int detect_onfi(void)
348 {
349 	int ret;
350 	char id[4];
351 
352 	ret = nand_reset();
353 	if (ret != 0) {
354 		return ret;
355 	}
356 
357 	ret = nand_read_id(ONFI_SIGNATURE_ADDR, (uint8_t *)id, sizeof(id));
358 	if (ret != 0) {
359 		return ret;
360 	}
361 
362 	if (strncmp(id, "ONFI", sizeof(id)) != 0) {
363 		WARN("NAND Non ONFI detected\n");
364 		return -ENODEV;
365 	}
366 
367 	return nand_read_param_page();
368 }
369 #endif
370 
nand_mtd_block_is_bad(unsigned int block)371 static int nand_mtd_block_is_bad(unsigned int block)
372 {
373 	unsigned int nbpages_per_block = rawnand_dev.nand_dev->block_size /
374 					 rawnand_dev.nand_dev->page_size;
375 	uint8_t bbm_marker[2];
376 	uint8_t page;
377 	int ret;
378 
379 	for (page = 0U; page < 2U; page++) {
380 		ret = nand_read_page_cmd(block * nbpages_per_block,
381 					 rawnand_dev.nand_dev->page_size,
382 					 (uintptr_t)bbm_marker,
383 					 sizeof(bbm_marker));
384 		if (ret != 0) {
385 			return ret;
386 		}
387 
388 		if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
389 		    (bbm_marker[1] != GENMASK_32(7, 0))) {
390 			WARN("Block %u is bad\n", block);
391 			return 1;
392 		}
393 	}
394 
395 	return 0;
396 }
397 
nand_mtd_read_page_raw(struct nand_device * nand,unsigned int page,uintptr_t buffer)398 static int nand_mtd_read_page_raw(struct nand_device *nand, unsigned int page,
399 				  uintptr_t buffer)
400 {
401 	return nand_read_page_cmd(page, 0U, buffer,
402 				  rawnand_dev.nand_dev->page_size);
403 }
404 
nand_raw_ctrl_init(const struct nand_ctrl_ops * ops)405 void nand_raw_ctrl_init(const struct nand_ctrl_ops *ops)
406 {
407 	rawnand_dev.ops = ops;
408 }
409 
nand_raw_init(unsigned long long * size,unsigned int * erase_size)410 int nand_raw_init(unsigned long long *size, unsigned int *erase_size)
411 {
412 	rawnand_dev.nand_dev = get_nand_device();
413 	if (rawnand_dev.nand_dev == NULL) {
414 		return -EINVAL;
415 	}
416 
417 	rawnand_dev.nand_dev->mtd_block_is_bad = nand_mtd_block_is_bad;
418 	rawnand_dev.nand_dev->mtd_read_page = nand_mtd_read_page_raw;
419 	rawnand_dev.nand_dev->ecc.mode = NAND_ECC_NONE;
420 
421 	if ((rawnand_dev.ops->setup == NULL) ||
422 	    (rawnand_dev.ops->exec == NULL)) {
423 		return -ENODEV;
424 	}
425 
426 #if NAND_ONFI_DETECT
427 	if (detect_onfi() != 0) {
428 		WARN("Detect ONFI failed\n");
429 	}
430 #endif
431 
432 	if (plat_get_raw_nand_data(&rawnand_dev) != 0) {
433 		return -EINVAL;
434 	}
435 
436 	assert((rawnand_dev.nand_dev->page_size != 0U) &&
437 	       (rawnand_dev.nand_dev->block_size != 0U) &&
438 	       (rawnand_dev.nand_dev->size != 0U));
439 
440 	*size = rawnand_dev.nand_dev->size;
441 	*erase_size = rawnand_dev.nand_dev->block_size;
442 
443 	rawnand_dev.ops->setup(rawnand_dev.nand_dev);
444 
445 	return 0;
446 }
447