nand_spl_load_image(uint32_t offs,unsigned int size,void * dst)1 int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
2 {
3 unsigned int block, lastblock;
4 unsigned int page, page_offset;
5
6 /* offs has to be aligned to a page address! */
7 block = offs / CONFIG_SYS_NAND_BLOCK_SIZE;
8 lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE;
9 page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE;
10 page_offset = offs % CONFIG_SYS_NAND_PAGE_SIZE;
11
12 while (block <= lastblock) {
13 if (!nand_is_bad_block(block)) {
14 /* Skip bad blocks */
15 while (page < CONFIG_SYS_NAND_PAGE_COUNT) {
16 nand_read_page(block, page, dst);
17 /*
18 * When offs is not aligned to page address the
19 * extra offset is copied to dst as well. Copy
20 * the image such that its first byte will be
21 * at the dst.
22 */
23 if (unlikely(page_offset)) {
24 memmove(dst, dst + page_offset,
25 CONFIG_SYS_NAND_PAGE_SIZE);
26 dst = (void *)((int)dst - page_offset);
27 page_offset = 0;
28 }
29 dst += CONFIG_SYS_NAND_PAGE_SIZE;
30 page++;
31 }
32
33 page = 0;
34 } else {
35 lastblock++;
36 }
37
38 block++;
39 }
40
41 return 0;
42 }
43
44 /**
45 * nand_spl_adjust_offset - Adjust offset from a starting sector
46 * @sector: Address of the sector
47 * @offs: Offset starting from @sector
48 *
49 * If one or more bad blocks are in the address space between @sector
50 * and @sector + @offs, @offs is increased by the NAND block size for
51 * each bad block found.
52 */
nand_spl_adjust_offset(u32 sector,u32 offs)53 u32 nand_spl_adjust_offset(u32 sector, u32 offs)
54 {
55 unsigned int block, lastblock;
56
57 block = sector / CONFIG_SYS_NAND_BLOCK_SIZE;
58 lastblock = (sector + offs) / CONFIG_SYS_NAND_BLOCK_SIZE;
59
60 while (block <= lastblock) {
61 if (nand_is_bad_block(block)) {
62 offs += CONFIG_SYS_NAND_BLOCK_SIZE;
63 lastblock++;
64 }
65
66 block++;
67 }
68
69 return offs;
70 }
71
72 #ifdef CONFIG_SPL_UBI
73 /*
74 * Temporary storage for non NAND page aligned and non NAND page sized
75 * reads. Note: This does not support runtime detected FLASH yet, but
76 * that should be reasonably easy to fix by making the buffer large
77 * enough :)
78 */
79 static u8 scratch_buf[CONFIG_SYS_NAND_PAGE_SIZE];
80
81 /**
82 * nand_spl_read_block - Read data from physical eraseblock into a buffer
83 * @block: Number of the physical eraseblock
84 * @offset: Data offset from the start of @peb
85 * @len: Data size to read
86 * @dst: Address of the destination buffer
87 *
88 * This could be further optimized if we'd have a subpage read
89 * function in the simple code. On NAND which allows subpage reads
90 * this would spare quite some time to readout e.g. the VID header of
91 * UBI.
92 *
93 * Notes:
94 * @offset + @len are not allowed to be larger than a physical
95 * erase block. No sanity check done for simplicity reasons.
96 *
97 * To support runtime detected flash this needs to be extended by
98 * information about the actual flash geometry, but thats beyond the
99 * scope of this effort and for most applications where fast boot is
100 * required it is not an issue anyway.
101 */
nand_spl_read_block(int block,int offset,int len,void * dst)102 int nand_spl_read_block(int block, int offset, int len, void *dst)
103 {
104 int page, read;
105
106 /* Calculate the page number */
107 page = offset / CONFIG_SYS_NAND_PAGE_SIZE;
108
109 /* Offset to the start of a flash page */
110 offset = offset % CONFIG_SYS_NAND_PAGE_SIZE;
111
112 while (len) {
113 /*
114 * Non page aligned reads go to the scratch buffer.
115 * Page aligned reads go directly to the destination.
116 */
117 if (offset || len < CONFIG_SYS_NAND_PAGE_SIZE) {
118 nand_read_page(block, page, scratch_buf);
119 read = min(len, CONFIG_SYS_NAND_PAGE_SIZE - offset);
120 memcpy(dst, scratch_buf + offset, read);
121 offset = 0;
122 } else {
123 nand_read_page(block, page, dst);
124 read = CONFIG_SYS_NAND_PAGE_SIZE;
125 }
126 page++;
127 len -= read;
128 dst += read;
129 }
130 return 0;
131 }
132 #endif
133