1 /*
2 * pmm.c - POST(Power On Self Test) Memory Manager
3 * according to the specification described in
4 * http://www.phoenix.com/NR/rdonlyres/873A00CF-33AC-4775-B77E-08E7B9754993/0/specspmm101.pdf
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Copyright (C) 2009 FUJITSU LIMITED
20 *
21 * Author: Kouya Shimura <kouya@jp.fujitsu.com>
22 */
23
24 /*
25 * Algorithm:
26 *
27 * This is not a fast storage allocator but simple one. There is no
28 * segregated management by block size and it does nothing special for
29 * avoiding the fragmentation.
30 *
31 * The allocation algorithm is a first-fit. All memory blocks are
32 * managed by linear single linked list in order of the address.
33 * (i.e. There is no backward pointer) It searches the first available
34 * equal or larger block from the head (lowest address) of memory
35 * heap. The larger block is splitted into two blocks unless one side
36 * becomes too small.
37 *
38 * For de-allocation, the specified block is just marked as available
39 * and it does nothing else. Thus, the fragmentation will occur. The
40 * collection of continuous available blocks are done on the search
41 * phase of another block allocation.
42 *
43 * The following is an abstract of this algorithm. The actual code
44 * looks complicated on account of alignment and checking the handle.
45 *
46 * static memblk_t *
47 * alloc(heap_t *heap, uint32_t size)
48 * {
49 * static memblk_t *mb;
50 * for_each_memblk(heap, mb) // search memory blocks
51 * if (memblk_is_avail(mb))
52 * {
53 * collect_avail_memblks(heap, mb);
54 * if (size <= memblk_bufsize(mb))
55 * {
56 * split_memblk(mb, size);
57 * set_inuse(mb);
58 * return mb;
59 * }
60 * }
61 * return NULL;
62 * }
63 */
64
65 #include <stdint.h>
66 #include <stddef.h>
67 #include "config.h"
68 #include "e820.h"
69 #include "util.h"
70
71 #define DEBUG_PMM 0
72
73 #define __stringify(a) #a
74 #define stringify(a) __stringify(a)
75
76 #define ASSERT(_expr, _action) \
77 if (!(_expr)) { \
78 printf("ASSERTION FAIL: %s %s:%d %s()\n", \
79 stringify(_expr), __FILE__, __LINE__, __func__); \
80 _action; \
81 } else
82
83 #if DEBUG_PMM
84 # define PMM_DEBUG(format, p...) printf("PMM " format, ##p)
85 #else
86 # define PMM_DEBUG(format, p...)
87 #endif
88
89 struct pmmAllocArgs {
90 uint16_t function;
91 uint32_t length;
92 uint32_t handle;
93 uint16_t flags;
94 } __attribute__ ((packed));
95
96 struct pmmFindArgs {
97 uint16_t function;
98 uint32_t handle;
99 } __attribute__ ((packed));
100
101 struct pmmDeallocateArgs {
102 uint16_t function;
103 uint32_t buffer;
104 } __attribute__ ((packed));
105
106 #define PMM_FUNCTION_ALLOCATE 0
107 #define PMM_FUNCTION_FIND 1
108 #define PMM_FUNCTION_DEALLOC 2
109
110 #define PARAGRAPH_LENGTH 16 // unit of length
111
112 #define PMM_HANDLE_ANONYMOUS 0xffffffff
113
114 #define PMM_FLAGS_MEMORY_TYPE_MASK 0x0003
115 #define PMM_FLAGS_MEMORY_INVALID 0
116 #define PMM_FLAGS_MEMORY_CONVENTIONAL 1 // 0 to 1MB
117 #define PMM_FLAGS_MEMORY_EXTENDED 2 // 1MB to 4GB
118 #define PMM_FLAGS_MEMORY_ANY 3 // whichever is available
119 #define PMM_FLAGS_ALIGINMENT 0x0004
120
121 /* Error code */
122 #define PMM_ENOMEM (0) // Out of memory, duplicate handle
123 #define PMM_EINVAL (-1) // Invalid argument
124
125 #define ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
126 #define ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
127
128 typedef struct memblk {
129 uint32_t magic; // inuse or available
130 struct memblk *next; // points the very next of this memblk
131 uint32_t handle; // identifier of this block
132 uint32_t __fill; // for 16byte alignment, not used
133 uint8_t buffer[0];
134 } memblk_t;
135
136 typedef struct heap {
137 memblk_t *head; // start address of heap
138 memblk_t *end; // end address of heap
139 } heap_t;
140
141 #define HEAP_NOT_INITIALIZED (memblk_t *)-1
142 #define HEAP_ALIGNMENT 16
143
144 /*
145 * PMM handles two memory heaps, the caller chooses either.
146 *
147 * - conventional memroy (below 1MB)
148 * In HVM, the area is fixed. 0x00010000-0x0007FFFF
149 * (LOWHEAP_SIZE bytes from LOWHEAP_PHYSICAL_ADDRESS)
150 *
151 * - extended memory (start at 1MB, below 4GB)
152 * In HVM, the area starts at memory address 0x00100000.
153 * The end address is variable. We read low RAM address from e820 table.
154 *
155 * The following struct must be located in the data segment since bss
156 * in 32bitbios doesn't be relocated.
157 */
158 static struct {
159 heap_t heap; // conventional memory
160 heap_t ext_heap; // extended memory
161 } pmm_data = { {HEAP_NOT_INITIALIZED, NULL}, {NULL, NULL} };
162
163 /* These values are private use, not a spec in PMM */
164 #define MEMBLK_MAGIC_INUSE 0x2A4D4D50 // 'PMM*'
165 #define MEMBLK_MAGIC_AVAIL 0x5F4D4D50 // 'PMM_'
166
167 #define memblk_is_inuse(_mb) ((_mb)->magic == MEMBLK_MAGIC_INUSE)
168 #define memblk_is_avail(_mb) ((_mb)->magic == MEMBLK_MAGIC_AVAIL)
169
set_inuse(memblk_t * mb,uint32_t handle)170 static void set_inuse(memblk_t *mb, uint32_t handle)
171 {
172 mb->magic = MEMBLK_MAGIC_INUSE;
173 mb->handle = handle;
174 }
175
set_avail(memblk_t * mb)176 static void set_avail(memblk_t *mb)
177 {
178 mb->magic = MEMBLK_MAGIC_AVAIL;
179 mb->handle = PMM_HANDLE_ANONYMOUS;
180 }
181
182 #define MEMBLK_HEADER_SIZE ((int)(&((memblk_t *)0)->buffer))
183 #define MIN_MEMBLK_SIZE (MEMBLK_HEADER_SIZE + PARAGRAPH_LENGTH)
184
185 #define memblk_size(_mb) ((void *)((_mb)->next) - (void *)(_mb))
186 #define memblk_buffer(_mb) ((uint32_t)(&(_mb)->buffer))
187 #define memblk_bufsize(_mb) (memblk_size(_mb) - MEMBLK_HEADER_SIZE)
188
189 #define buffer_memblk(_buf) (memblk_t *)((_buf) - MEMBLK_HEADER_SIZE)
190
191 #define memblk_loop_mbondition(_h, _mb) \
192 (((_mb) < (_h)->end) && (/* avoid infinite loop */ (_mb) < (_mb)->next))
193
194 #define for_each_memblk(_h, _mb) \
195 for ((_mb) = (_h)->head; \
196 memblk_loop_mbondition(_h, _mb); \
197 (_mb) = (_mb)->next)
198
199 #define for_remain_memblk(_h, _mb) \
200 for (; \
201 memblk_loop_mbondition(_h, _mb); \
202 (_mb) = (_mb)->next)
203
204 /*
205 * <-size->
206 * +==================+======+ +========+========+======+
207 * | avail | | | avail | avail | |
208 * | memblk |memblk|... | memblk | memblk |memblk|...
209 * +==================+======+ => +========+========+======+
210 * ^ | ^ | ^ | ^ | ^ | ^
211 * | |next | |next| |next | |next | |next|
212 * | \________________/ \____/ \______/ \______/ \____/
213 * | ^
214 * | |
215 * mb +- sb(return value)
216 */
217 static memblk_t *
split_memblk(memblk_t * mb,uint32_t size)218 split_memblk(memblk_t *mb, uint32_t size)
219 {
220 memblk_t *sb = (void *)memblk_buffer(mb) + size;
221
222 /* Only split if the remaining fragment is big enough. */
223 if ( (memblk_bufsize(mb) - size) < MIN_MEMBLK_SIZE)
224 return mb;
225
226 sb->next = mb->next;
227 set_avail(sb);
228
229 mb->next = sb;
230 return sb;
231 }
232
233 /*
234 * +======+======+======+======+ +=================+======+
235 * |avail |avail |avail |inuse | | avail |inuse |
236 * |memblk|memblk|memblk|memblk|... | memblk |memblk|...
237 * +======+======+======+======+ => +=================+======+
238 * ^ | ^ | ^ | ^ | ^ | ^ | ^
239 * | |next| |next| |next| |next| |next | |next|
240 * | \____/ \____/ \____/ \____/ \_______________/ \____/
241 * |
242 * mb
243 */
244 static void
collect_avail_memblks(heap_t * heap,memblk_t * mb)245 collect_avail_memblks(heap_t *heap, memblk_t *mb)
246 {
247 memblk_t *nb = mb->next;
248
249 for_remain_memblk ( heap, nb )
250 if ( memblk_is_inuse(nb) )
251 break;
252 mb->next = nb;
253 }
254
255 static void
pmm_init_heap(heap_t * heap,uint32_t from_addr,uint32_t to_addr)256 pmm_init_heap(heap_t *heap, uint32_t from_addr, uint32_t to_addr)
257 {
258 memblk_t *mb = (memblk_t *)ALIGN_UP(from_addr, HEAP_ALIGNMENT);
259
260 mb->next = (memblk_t *)ALIGN_DOWN(to_addr, HEAP_ALIGNMENT);
261 set_avail(mb);
262
263 heap->head = mb;
264 heap->end = mb->next;
265 }
266
267 static void
pmm_initalize(void)268 pmm_initalize(void)
269 {
270 int i, e820_nr = *E820_NR;
271 struct e820entry *e820 = E820;
272
273 /* Extended memory: RAM below 4GB, 0x100000-0xXXXXXXXX */
274 for ( i = 0; i < e820_nr; i++ )
275 {
276 if ( (e820[i].type == E820_RAM) && (e820[i].addr >= 0x00100000) )
277 {
278 pmm_init_heap(&pmm_data.ext_heap, e820[i].addr,
279 e820[i].addr + e820[i].size);
280 break;
281 }
282 }
283
284 /* convectional memory: RAM below 1MB, 0x10000-0x7FFFF */
285 pmm_init_heap(&pmm_data.heap,
286 LOWHEAP_PHYSICAL_ADDRESS,
287 LOWHEAP_PHYSICAL_ADDRESS+LOWHEAP_SIZE);
288 }
289
290 static uint32_t
pmm_max_avail_length(heap_t * heap)291 pmm_max_avail_length(heap_t *heap)
292 {
293 memblk_t *mb;
294 uint32_t size, max = 0;
295
296 for_each_memblk ( heap, mb )
297 {
298 if ( !memblk_is_avail(mb) )
299 continue;
300 collect_avail_memblks(heap, mb);
301 size = memblk_bufsize(mb);
302 if ( size > max )
303 max = size;
304 }
305
306 return (max / PARAGRAPH_LENGTH);
307 }
308
309 static memblk_t *
first_fit(heap_t * heap,uint32_t size,uint32_t handle,uint32_t flags)310 first_fit(heap_t *heap, uint32_t size, uint32_t handle, uint32_t flags)
311 {
312 memblk_t *mb;
313 int32_t align = 0;
314
315 if ( flags & PMM_FLAGS_ALIGINMENT )
316 align = ((size ^ (size - 1)) >> 1) + 1;
317
318 for_each_memblk ( heap, mb )
319 {
320 if ( memblk_is_avail(mb) )
321 {
322 collect_avail_memblks(heap, mb);
323
324 if ( align )
325 {
326 uint32_t addr = memblk_buffer(mb);
327 uint32_t offset = ALIGN_UP(addr, align) - addr;
328
329 if ( offset > 0 )
330 {
331 ASSERT(offset >= MEMBLK_HEADER_SIZE, continue);
332
333 if ( (offset + size) > memblk_bufsize(mb) )
334 continue;
335
336 mb = split_memblk(mb, offset - MEMBLK_HEADER_SIZE);
337 return mb;
338 }
339 }
340
341 if ( size <= memblk_bufsize(mb) )
342 return mb;
343 }
344 else
345 {
346 ASSERT(memblk_is_inuse(mb), return NULL);
347
348 /* Duplication check for handle. */
349 if ( (handle != PMM_HANDLE_ANONYMOUS) && (mb->handle == handle) )
350 return NULL;
351 }
352 }
353
354 return NULL;
355 }
356
357 static memblk_t *
pmm_find_handle(heap_t * heap,uint32_t handle)358 pmm_find_handle(heap_t *heap, uint32_t handle)
359 {
360 memblk_t *mb;
361
362 if ( handle == PMM_HANDLE_ANONYMOUS )
363 return NULL;
364
365 for_each_memblk ( heap, mb )
366 if ( mb->handle == handle )
367 return mb;
368
369 return NULL;
370 }
371
372 /*
373 * allocate a memory block of the specified type and size, and returns
374 * the address of the memory block.
375 *
376 * A client-specified identifier to be associated with the allocated
377 * memory block. A handle of 0xFFFFFFFF indicates that no identifier
378 * should be associated with the block. Such a memory block is known
379 * as an "anonymous" memory block and cannot be found using the
380 * pmmFind function. If a specified handle for a requested memory
381 * block is already used in a currently allocated memory block, the
382 * error value of 0x00000000 is returned
383 *
384 * If length is 0x00000000, no memory is allocated and the value
385 * returned is the size of the largest memory block available for the
386 * memory type specified in the flags parameter. The alignment bit in
387 * the flags register is ignored when calculating the largest memory
388 * block available.
389 *
390 * If a specified handle for a requested memory block is already used
391 * in a currently allocated memory block, the error value of
392 * 0x00000000 is returned.
393 *
394 * A return value of 0x00000000 indicates that an error occurred and
395 * no memory has been allocated.
396 */
397 static uint32_t
pmmAllocate(uint32_t length,uint32_t handle,uint16_t flags)398 pmmAllocate(uint32_t length, uint32_t handle, uint16_t flags)
399 {
400 heap_t *heap;
401 memblk_t *mb;
402 uint32_t size;
403
404 switch ( flags & PMM_FLAGS_MEMORY_TYPE_MASK )
405 {
406 case PMM_FLAGS_MEMORY_CONVENTIONAL:
407 heap = &pmm_data.heap;
408 break;
409
410 case PMM_FLAGS_MEMORY_EXTENDED:
411 case PMM_FLAGS_MEMORY_ANY: /* XXX: ignore conventional memory for now */
412 heap = &pmm_data.ext_heap;
413 break;
414
415 default:
416 return PMM_EINVAL;
417 }
418
419 /* return the largest memory block available */
420 if ( length == 0 )
421 return pmm_max_avail_length(heap);
422
423 size = length * PARAGRAPH_LENGTH;
424 mb = first_fit(heap, size, handle, flags);
425
426 if ( mb == NULL )
427 return PMM_ENOMEM;
428
429 /* duplication check for handle */
430 if ( handle != PMM_HANDLE_ANONYMOUS )
431 {
432 memblk_t *nb = mb->next;
433
434 for_remain_memblk(heap, nb)
435 if (nb->handle == handle)
436 return PMM_ENOMEM;
437 }
438
439 split_memblk(mb, size);
440 set_inuse(mb, handle);
441
442 return memblk_buffer(mb);
443 }
444
445 /*
446 * returns the address of the memory block associated with the
447 * specified handle.
448 *
449 * A return value of 0x00000000 indicates that the handle does not
450 * correspond to a currently allocated memory block.
451 */
452 static uint32_t
pmmFind(uint32_t handle)453 pmmFind(uint32_t handle)
454 {
455 memblk_t *mb;
456
457 if ( handle == PMM_HANDLE_ANONYMOUS )
458 return 0;
459
460 mb = pmm_find_handle(&pmm_data.heap, handle);
461 if ( mb == NULL )
462 mb = pmm_find_handle(&pmm_data.ext_heap, handle);
463
464 return mb ? memblk_buffer(mb) : 0;
465 }
466
467 /*
468 * frees the specified memory block that was previously allocated by
469 * pmmAllocate.
470 *
471 * If the memory block was deallocated correctly, the return value is
472 * 0x00000000. If there was an error, the return value is non-zero.
473 */
474 static uint32_t
pmmDeallocate(uint32_t buffer)475 pmmDeallocate(uint32_t buffer)
476 {
477 memblk_t *mb = buffer_memblk(buffer);
478
479 if ( !memblk_is_inuse(mb) )
480 return PMM_EINVAL;
481
482 set_avail(mb);
483 return 0;
484 }
485
486
487 union pmm_args {
488 uint16_t function;
489 struct pmmAllocArgs alloc;
490 struct pmmFindArgs find;
491 struct pmmDeallocateArgs dealloc;
492 } __attribute__ ((packed));
493
494 /*
495 * entry function of all PMM services.
496 *
497 * Values returned to the caller are placed in the DX:AX register
498 * pair. The flags and all registers, other than DX and AX, are
499 * preserved across calls to PMM services.
500 */
501 uint32_t
pmm(void * argp)502 pmm(void *argp)
503 {
504 union pmm_args *ap = argp;
505 uint32_t ret = PMM_EINVAL;
506
507 if ( pmm_data.heap.head == HEAP_NOT_INITIALIZED )
508 pmm_initalize();
509
510 switch ( ap->function )
511 {
512 case PMM_FUNCTION_ALLOCATE:
513 ret = pmmAllocate(ap->alloc.length, ap->alloc.handle, ap->alloc.flags);
514 PMM_DEBUG("Alloc length=%x handle=%x flags=%x ret=%x\n",
515 ap->alloc.length, ap->alloc.handle, ap->alloc.flags, ret);
516 break;
517
518 case PMM_FUNCTION_FIND:
519 ret = pmmFind(ap->find.handle);
520 PMM_DEBUG("Find handle=%x ret=%x\n", ap->find.handle, ret);
521 break;
522
523 case PMM_FUNCTION_DEALLOC:
524 ret = pmmDeallocate(ap->dealloc.buffer);
525 PMM_DEBUG("Dealloc buffer=%x ret=%x\n", ap->dealloc.buffer, ret);
526 break;
527
528 default:
529 PMM_DEBUG("Invalid function:%d\n", ap->function);
530 break;
531 }
532
533 return ret;
534 }
535