1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Procedures for maintaining information about logical memory blocks.
4  *
5  * Peter Bergner, IBM Corp.	June 2001.
6  * Copyright (C) 2001 Peter Bergner.
7  */
8 
9 #include <common.h>
10 #include <image.h>
11 #include <lmb.h>
12 #include <log.h>
13 #include <malloc.h>
14 
15 #define LMB_ALLOC_ANYWHERE	0
16 
lmb_dump_all_force(struct lmb * lmb)17 void lmb_dump_all_force(struct lmb *lmb)
18 {
19 	unsigned long i;
20 
21 	printf("lmb_dump_all:\n");
22 	printf("    memory.cnt		   = 0x%lx\n", lmb->memory.cnt);
23 	printf("    memory.size		   = 0x%llx\n",
24 	       (unsigned long long)lmb->memory.size);
25 	for (i = 0; i < lmb->memory.cnt; i++) {
26 		printf("    memory.reg[0x%lx].base   = 0x%llx\n", i,
27 		       (unsigned long long)lmb->memory.region[i].base);
28 		printf("		   .size   = 0x%llx\n",
29 		       (unsigned long long)lmb->memory.region[i].size);
30 	}
31 
32 	printf("\n    reserved.cnt	   = 0x%lx\n", lmb->reserved.cnt);
33 	printf("    reserved.size	   = 0x%llx\n",
34 	       (unsigned long long)lmb->reserved.size);
35 	for (i = 0; i < lmb->reserved.cnt; i++) {
36 		printf("    reserved.reg[0x%lx].base = 0x%llx\n", i,
37 		       (unsigned long long)lmb->reserved.region[i].base);
38 		printf("		     .size = 0x%llx\n",
39 		       (unsigned long long)lmb->reserved.region[i].size);
40 	}
41 }
42 
lmb_dump_all(struct lmb * lmb)43 void lmb_dump_all(struct lmb *lmb)
44 {
45 #ifdef DEBUG
46 	lmb_dump_all_force(lmb);
47 #endif
48 }
49 
lmb_addrs_overlap(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)50 static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
51 			      phys_addr_t base2, phys_size_t size2)
52 {
53 	const phys_addr_t base1_end = base1 + size1 - 1;
54 	const phys_addr_t base2_end = base2 + size2 - 1;
55 
56 	return ((base1 <= base2_end) && (base2 <= base1_end));
57 }
58 
lmb_addrs_adjacent(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)59 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
60 			       phys_addr_t base2, phys_size_t size2)
61 {
62 	if (base2 == base1 + size1)
63 		return 1;
64 	else if (base1 == base2 + size2)
65 		return -1;
66 
67 	return 0;
68 }
69 
lmb_regions_adjacent(struct lmb_region * rgn,unsigned long r1,unsigned long r2)70 static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
71 				 unsigned long r2)
72 {
73 	phys_addr_t base1 = rgn->region[r1].base;
74 	phys_size_t size1 = rgn->region[r1].size;
75 	phys_addr_t base2 = rgn->region[r2].base;
76 	phys_size_t size2 = rgn->region[r2].size;
77 
78 	return lmb_addrs_adjacent(base1, size1, base2, size2);
79 }
80 
lmb_remove_region(struct lmb_region * rgn,unsigned long r)81 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
82 {
83 	unsigned long i;
84 
85 	for (i = r; i < rgn->cnt - 1; i++) {
86 		rgn->region[i].base = rgn->region[i + 1].base;
87 		rgn->region[i].size = rgn->region[i + 1].size;
88 	}
89 	rgn->cnt--;
90 }
91 
92 /* Assumption: base addr of region 1 < base addr of region 2 */
lmb_coalesce_regions(struct lmb_region * rgn,unsigned long r1,unsigned long r2)93 static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
94 				 unsigned long r2)
95 {
96 	rgn->region[r1].size += rgn->region[r2].size;
97 	lmb_remove_region(rgn, r2);
98 }
99 
lmb_init(struct lmb * lmb)100 void lmb_init(struct lmb *lmb)
101 {
102 	lmb->memory.cnt = 0;
103 	lmb->memory.size = 0;
104 	lmb->reserved.cnt = 0;
105 	lmb->reserved.size = 0;
106 }
107 
lmb_reserve_common(struct lmb * lmb,void * fdt_blob)108 static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
109 {
110 	arch_lmb_reserve(lmb);
111 	board_lmb_reserve(lmb);
112 
113 	if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob)
114 		boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
115 }
116 
117 /* Initialize the struct, add memory and call arch/board reserve functions */
lmb_init_and_reserve(struct lmb * lmb,struct bd_info * bd,void * fdt_blob)118 void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob)
119 {
120 	int i;
121 
122 	lmb_init(lmb);
123 
124 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
125 		if (bd->bi_dram[i].size) {
126 			lmb_add(lmb, bd->bi_dram[i].start,
127 				bd->bi_dram[i].size);
128 		}
129 	}
130 
131 	lmb_reserve_common(lmb, fdt_blob);
132 }
133 
134 /* Initialize the struct, add memory and call arch/board reserve functions */
lmb_init_and_reserve_range(struct lmb * lmb,phys_addr_t base,phys_size_t size,void * fdt_blob)135 void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
136 				phys_size_t size, void *fdt_blob)
137 {
138 	lmb_init(lmb);
139 	lmb_add(lmb, base, size);
140 	lmb_reserve_common(lmb, fdt_blob);
141 }
142 
143 /* This routine called with relocation disabled. */
lmb_add_region(struct lmb_region * rgn,phys_addr_t base,phys_size_t size)144 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
145 {
146 	unsigned long coalesced = 0;
147 	long adjacent, i;
148 
149 	if (rgn->cnt == 0) {
150 		rgn->region[0].base = base;
151 		rgn->region[0].size = size;
152 		rgn->cnt = 1;
153 		return 0;
154 	}
155 
156 	/* First try and coalesce this LMB with another. */
157 	for (i = 0; i < rgn->cnt; i++) {
158 		phys_addr_t rgnbase = rgn->region[i].base;
159 		phys_size_t rgnsize = rgn->region[i].size;
160 
161 		if ((rgnbase == base) && (rgnsize == size))
162 			/* Already have this region, so we're done */
163 			return 0;
164 
165 		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
166 		if (adjacent > 0) {
167 			rgn->region[i].base -= size;
168 			rgn->region[i].size += size;
169 			coalesced++;
170 			break;
171 		} else if (adjacent < 0) {
172 			rgn->region[i].size += size;
173 			coalesced++;
174 			break;
175 		} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
176 			/* regions overlap */
177 			return -1;
178 		}
179 	}
180 
181 	if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
182 		lmb_coalesce_regions(rgn, i, i + 1);
183 		coalesced++;
184 	}
185 
186 	if (coalesced)
187 		return coalesced;
188 	if (rgn->cnt >= MAX_LMB_REGIONS)
189 		return -1;
190 
191 	/* Couldn't coalesce the LMB, so add it to the sorted table. */
192 	for (i = rgn->cnt-1; i >= 0; i--) {
193 		if (base < rgn->region[i].base) {
194 			rgn->region[i + 1].base = rgn->region[i].base;
195 			rgn->region[i + 1].size = rgn->region[i].size;
196 		} else {
197 			rgn->region[i + 1].base = base;
198 			rgn->region[i + 1].size = size;
199 			break;
200 		}
201 	}
202 
203 	if (base < rgn->region[0].base) {
204 		rgn->region[0].base = base;
205 		rgn->region[0].size = size;
206 	}
207 
208 	rgn->cnt++;
209 
210 	return 0;
211 }
212 
213 /* This routine may be called with relocation disabled. */
lmb_add(struct lmb * lmb,phys_addr_t base,phys_size_t size)214 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
215 {
216 	struct lmb_region *_rgn = &(lmb->memory);
217 
218 	return lmb_add_region(_rgn, base, size);
219 }
220 
lmb_free(struct lmb * lmb,phys_addr_t base,phys_size_t size)221 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
222 {
223 	struct lmb_region *rgn = &(lmb->reserved);
224 	phys_addr_t rgnbegin, rgnend;
225 	phys_addr_t end = base + size - 1;
226 	int i;
227 
228 	rgnbegin = rgnend = 0; /* supress gcc warnings */
229 
230 	/* Find the region where (base, size) belongs to */
231 	for (i = 0; i < rgn->cnt; i++) {
232 		rgnbegin = rgn->region[i].base;
233 		rgnend = rgnbegin + rgn->region[i].size - 1;
234 
235 		if ((rgnbegin <= base) && (end <= rgnend))
236 			break;
237 	}
238 
239 	/* Didn't find the region */
240 	if (i == rgn->cnt)
241 		return -1;
242 
243 	/* Check to see if we are removing entire region */
244 	if ((rgnbegin == base) && (rgnend == end)) {
245 		lmb_remove_region(rgn, i);
246 		return 0;
247 	}
248 
249 	/* Check to see if region is matching at the front */
250 	if (rgnbegin == base) {
251 		rgn->region[i].base = end + 1;
252 		rgn->region[i].size -= size;
253 		return 0;
254 	}
255 
256 	/* Check to see if the region is matching at the end */
257 	if (rgnend == end) {
258 		rgn->region[i].size -= size;
259 		return 0;
260 	}
261 
262 	/*
263 	 * We need to split the entry -  adjust the current one to the
264 	 * beginging of the hole and add the region after hole.
265 	 */
266 	rgn->region[i].size = base - rgn->region[i].base;
267 	return lmb_add_region(rgn, end + 1, rgnend - end);
268 }
269 
lmb_reserve(struct lmb * lmb,phys_addr_t base,phys_size_t size)270 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
271 {
272 	struct lmb_region *_rgn = &(lmb->reserved);
273 
274 	return lmb_add_region(_rgn, base, size);
275 }
276 
lmb_overlaps_region(struct lmb_region * rgn,phys_addr_t base,phys_size_t size)277 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
278 				phys_size_t size)
279 {
280 	unsigned long i;
281 
282 	for (i = 0; i < rgn->cnt; i++) {
283 		phys_addr_t rgnbase = rgn->region[i].base;
284 		phys_size_t rgnsize = rgn->region[i].size;
285 		if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
286 			break;
287 	}
288 
289 	return (i < rgn->cnt) ? i : -1;
290 }
291 
lmb_alloc(struct lmb * lmb,phys_size_t size,ulong align)292 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
293 {
294 	return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
295 }
296 
lmb_alloc_base(struct lmb * lmb,phys_size_t size,ulong align,phys_addr_t max_addr)297 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
298 {
299 	phys_addr_t alloc;
300 
301 	alloc = __lmb_alloc_base(lmb, size, align, max_addr);
302 
303 	if (alloc == 0)
304 		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
305 		       (ulong)size, (ulong)max_addr);
306 
307 	return alloc;
308 }
309 
lmb_align_down(phys_addr_t addr,phys_size_t size)310 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
311 {
312 	return addr & ~(size - 1);
313 }
314 
__lmb_alloc_base(struct lmb * lmb,phys_size_t size,ulong align,phys_addr_t max_addr)315 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
316 {
317 	long i, rgn;
318 	phys_addr_t base = 0;
319 	phys_addr_t res_base;
320 
321 	for (i = lmb->memory.cnt - 1; i >= 0; i--) {
322 		phys_addr_t lmbbase = lmb->memory.region[i].base;
323 		phys_size_t lmbsize = lmb->memory.region[i].size;
324 
325 		if (lmbsize < size)
326 			continue;
327 		if (max_addr == LMB_ALLOC_ANYWHERE)
328 			base = lmb_align_down(lmbbase + lmbsize - size, align);
329 		else if (lmbbase < max_addr) {
330 			base = lmbbase + lmbsize;
331 			if (base < lmbbase)
332 				base = -1;
333 			base = min(base, max_addr);
334 			base = lmb_align_down(base - size, align);
335 		} else
336 			continue;
337 
338 		while (base && lmbbase <= base) {
339 			rgn = lmb_overlaps_region(&lmb->reserved, base, size);
340 			if (rgn < 0) {
341 				/* This area isn't reserved, take it */
342 				if (lmb_add_region(&lmb->reserved, base,
343 						   size) < 0)
344 					return 0;
345 				return base;
346 			}
347 			res_base = lmb->reserved.region[rgn].base;
348 			if (res_base < size)
349 				break;
350 			base = lmb_align_down(res_base - size, align);
351 		}
352 	}
353 	return 0;
354 }
355 
356 /*
357  * Try to allocate a specific address range: must be in defined memory but not
358  * reserved
359  */
lmb_alloc_addr(struct lmb * lmb,phys_addr_t base,phys_size_t size)360 phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
361 {
362 	long rgn;
363 
364 	/* Check if the requested address is in one of the memory regions */
365 	rgn = lmb_overlaps_region(&lmb->memory, base, size);
366 	if (rgn >= 0) {
367 		/*
368 		 * Check if the requested end address is in the same memory
369 		 * region we found.
370 		 */
371 		if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
372 				      lmb->memory.region[rgn].size,
373 				      base + size - 1, 1)) {
374 			/* ok, reserve the memory */
375 			if (lmb_reserve(lmb, base, size) >= 0)
376 				return base;
377 		}
378 	}
379 	return 0;
380 }
381 
382 /* Return number of bytes from a given address that are free */
lmb_get_free_size(struct lmb * lmb,phys_addr_t addr)383 phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
384 {
385 	int i;
386 	long rgn;
387 
388 	/* check if the requested address is in the memory regions */
389 	rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
390 	if (rgn >= 0) {
391 		for (i = 0; i < lmb->reserved.cnt; i++) {
392 			if (addr < lmb->reserved.region[i].base) {
393 				/* first reserved range > requested address */
394 				return lmb->reserved.region[i].base - addr;
395 			}
396 			if (lmb->reserved.region[i].base +
397 			    lmb->reserved.region[i].size > addr) {
398 				/* requested addr is in this reserved range */
399 				return 0;
400 			}
401 		}
402 		/* if we come here: no reserved ranges above requested addr */
403 		return lmb->memory.region[lmb->memory.cnt - 1].base +
404 		       lmb->memory.region[lmb->memory.cnt - 1].size - addr;
405 	}
406 	return 0;
407 }
408 
lmb_is_reserved(struct lmb * lmb,phys_addr_t addr)409 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
410 {
411 	int i;
412 
413 	for (i = 0; i < lmb->reserved.cnt; i++) {
414 		phys_addr_t upper = lmb->reserved.region[i].base +
415 			lmb->reserved.region[i].size - 1;
416 		if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
417 			return 1;
418 	}
419 	return 0;
420 }
421 
board_lmb_reserve(struct lmb * lmb)422 __weak void board_lmb_reserve(struct lmb *lmb)
423 {
424 	/* please define platform specific board_lmb_reserve() */
425 }
426 
arch_lmb_reserve(struct lmb * lmb)427 __weak void arch_lmb_reserve(struct lmb *lmb)
428 {
429 	/* please define platform specific arch_lmb_reserve() */
430 }
431