1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 */
5
6 #define PROTOTYPES
7
8 /*
9 * BGET CONFIGURATION
10 * ==================
11 */
12 /* #define BGET_ENABLE_ALL_OPTIONS */
13 #ifdef BGET_ENABLE_OPTION
14 #define TestProg 20000 /* Generate built-in test program
15 if defined. The value specifies
16 how many buffer allocation attempts
17 the test program should make. */
18 #endif
19
20
21 #ifdef __LP64__
22 #define SizeQuant 16
23 #endif
24 #ifdef __ILP32__
25 #define SizeQuant 8
26 #endif
27 /* Buffer allocation size quantum:
28 all buffers allocated are a
29 multiple of this size. This
30 MUST be a power of two. */
31
32 #ifdef BGET_ENABLE_OPTION
33 #define BufDump 1 /* Define this symbol to enable the
34 bpoold() function which dumps the
35 buffers in a buffer pool. */
36
37 #define BufValid 1 /* Define this symbol to enable the
38 bpoolv() function for validating
39 a buffer pool. */
40
41 #define DumpData 1 /* Define this symbol to enable the
42 bufdump() function which allows
43 dumping the contents of an allocated
44 or free buffer. */
45
46 #define BufStats 1 /* Define this symbol to enable the
47 bstats() function which calculates
48 the total free space in the buffer
49 pool, the largest available
50 buffer, and the total space
51 currently allocated. */
52
53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed
54 pattern of garbage to trip up
55 miscreants who attempt to use
56 pointers into released buffers. */
57
58 #define BestFit 1 /* Use a best fit algorithm when
59 searching for space for an
60 allocation request. This uses
61 memory more efficiently, but
62 allocation will be much slower. */
63
64 #define BECtl 1 /* Define this symbol to enable the
65 bectl() function for automatic
66 pool space control. */
67 #endif
68
69 #ifdef MEM_DEBUG
70 #undef NDEBUG
71 #define DumpData 1
72 #define BufValid 1
73 #define FreeWipe 1
74 #endif
75
76 #ifdef CFG_WITH_STATS
77 #define BufStats 1
78 #endif
79
80 #include <compiler.h>
81 #include <malloc.h>
82 #include <stdbool.h>
83 #include <stdint.h>
84 #include <stdlib.h>
85 #include <stdlib_ext.h>
86 #include <string.h>
87 #include <trace.h>
88 #include <util.h>
89
90 #if defined(__KERNEL__)
91 /* Compiling for TEE Core */
92 #include <kernel/asan.h>
93 #include <kernel/thread.h>
94 #include <kernel/spinlock.h>
95 #include <kernel/unwind.h>
96
tag_asan_free(void * buf,size_t len)97 static void tag_asan_free(void *buf, size_t len)
98 {
99 asan_tag_heap_free(buf, (uint8_t *)buf + len);
100 }
101
tag_asan_alloced(void * buf,size_t len)102 static void tag_asan_alloced(void *buf, size_t len)
103 {
104 asan_tag_access(buf, (uint8_t *)buf + len);
105 }
106
memset_unchecked(void * s,int c,size_t n)107 static void *memset_unchecked(void *s, int c, size_t n)
108 {
109 return asan_memset_unchecked(s, c, n);
110 }
111
memcpy_unchecked(void * dst,const void * src,size_t n)112 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
113 size_t n)
114 {
115 return asan_memcpy_unchecked(dst, src, n);
116 }
117
118 #else /*__KERNEL__*/
119 /* Compiling for TA */
120
tag_asan_free(void * buf __unused,size_t len __unused)121 static void tag_asan_free(void *buf __unused, size_t len __unused)
122 {
123 }
124
tag_asan_alloced(void * buf __unused,size_t len __unused)125 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
126 {
127 }
128
memset_unchecked(void * s,int c,size_t n)129 static void *memset_unchecked(void *s, int c, size_t n)
130 {
131 return memset(s, c, n);
132 }
133
memcpy_unchecked(void * dst,const void * src,size_t n)134 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
135 size_t n)
136 {
137 return memcpy(dst, src, n);
138 }
139
140 #endif /*__KERNEL__*/
141
142 #include "bget.c" /* this is ugly, but this is bget */
143
144 struct malloc_pool {
145 void *buf;
146 size_t len;
147 };
148
149 struct malloc_ctx {
150 struct bpoolset poolset;
151 struct malloc_pool *pool;
152 size_t pool_len;
153 #ifdef BufStats
154 struct malloc_stats mstats;
155 #endif
156 #ifdef __KERNEL__
157 unsigned int spinlock;
158 #endif
159 };
160
161 #ifdef __KERNEL__
162
malloc_lock(struct malloc_ctx * ctx)163 static uint32_t malloc_lock(struct malloc_ctx *ctx)
164 {
165 return cpu_spin_lock_xsave(&ctx->spinlock);
166 }
167
malloc_unlock(struct malloc_ctx * ctx,uint32_t exceptions)168 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
169 {
170 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
171 }
172
173 #else /* __KERNEL__ */
174
malloc_lock(struct malloc_ctx * ctx __unused)175 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
176 {
177 return 0;
178 }
179
malloc_unlock(struct malloc_ctx * ctx __unused,uint32_t exceptions __unused)180 static void malloc_unlock(struct malloc_ctx *ctx __unused,
181 uint32_t exceptions __unused)
182 {
183 }
184
185 #endif /* __KERNEL__ */
186
187 #define DEFINE_CTX(name) struct malloc_ctx name = \
188 { .poolset = { .freelist = { {0, 0}, \
189 {&name.poolset.freelist, \
190 &name.poolset.freelist}}}}
191
192 static DEFINE_CTX(malloc_ctx);
193
194 #ifdef CFG_VIRTUALIZATION
195 static __nex_data DEFINE_CTX(nex_malloc_ctx);
196 #endif
197
print_oom(size_t req_size __maybe_unused,void * ctx __maybe_unused)198 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused)
199 {
200 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM)
201 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx);
202 print_kernel_stack();
203 #endif
204 }
205
206 #ifdef BufStats
207
raw_malloc_return_hook(void * p,size_t requested_size,struct malloc_ctx * ctx)208 static void raw_malloc_return_hook(void *p, size_t requested_size,
209 struct malloc_ctx *ctx)
210 {
211 if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
212 ctx->mstats.max_allocated = ctx->poolset.totalloc;
213
214 if (!p) {
215 ctx->mstats.num_alloc_fail++;
216 print_oom(requested_size, ctx);
217 if (requested_size > ctx->mstats.biggest_alloc_fail) {
218 ctx->mstats.biggest_alloc_fail = requested_size;
219 ctx->mstats.biggest_alloc_fail_used =
220 ctx->poolset.totalloc;
221 }
222 }
223 }
224
gen_malloc_reset_stats(struct malloc_ctx * ctx)225 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
226 {
227 uint32_t exceptions = malloc_lock(ctx);
228
229 ctx->mstats.max_allocated = 0;
230 ctx->mstats.num_alloc_fail = 0;
231 ctx->mstats.biggest_alloc_fail = 0;
232 ctx->mstats.biggest_alloc_fail_used = 0;
233 malloc_unlock(ctx, exceptions);
234 }
235
malloc_reset_stats(void)236 void malloc_reset_stats(void)
237 {
238 gen_malloc_reset_stats(&malloc_ctx);
239 }
240
gen_malloc_get_stats(struct malloc_ctx * ctx,struct malloc_stats * stats)241 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
242 struct malloc_stats *stats)
243 {
244 uint32_t exceptions = malloc_lock(ctx);
245
246 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats));
247 stats->allocated = ctx->poolset.totalloc;
248 malloc_unlock(ctx, exceptions);
249 }
250
malloc_get_stats(struct malloc_stats * stats)251 void malloc_get_stats(struct malloc_stats *stats)
252 {
253 gen_malloc_get_stats(&malloc_ctx, stats);
254 }
255
256 #else /* BufStats */
257
raw_malloc_return_hook(void * p,size_t requested_size,struct malloc_ctx * ctx)258 static void raw_malloc_return_hook(void *p, size_t requested_size,
259 struct malloc_ctx *ctx )
260 {
261 if (!p)
262 print_oom(requested_size, ctx);
263 }
264
265 #endif /* BufStats */
266
267 #ifdef BufValid
raw_malloc_validate_pools(struct malloc_ctx * ctx)268 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
269 {
270 size_t n;
271
272 for (n = 0; n < ctx->pool_len; n++)
273 bpoolv(ctx->pool[n].buf);
274 }
275 #else
raw_malloc_validate_pools(struct malloc_ctx * ctx __unused)276 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
277 {
278 }
279 #endif
280
281 struct bpool_iterator {
282 struct bfhead *next_buf;
283 size_t pool_idx;
284 };
285
bpool_foreach_iterator_init(struct malloc_ctx * ctx,struct bpool_iterator * iterator)286 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
287 struct bpool_iterator *iterator)
288 {
289 iterator->pool_idx = 0;
290 iterator->next_buf = BFH(ctx->pool[0].buf);
291 }
292
bpool_foreach_pool(struct bpool_iterator * iterator,void ** buf,size_t * len,bool * isfree)293 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
294 size_t *len, bool *isfree)
295 {
296 struct bfhead *b = iterator->next_buf;
297 bufsize bs = b->bh.bsize;
298
299 if (bs == ESent)
300 return false;
301
302 if (bs < 0) {
303 /* Allocated buffer */
304 bs = -bs;
305
306 *isfree = false;
307 } else {
308 /* Free Buffer */
309 *isfree = true;
310
311 /* Assert that the free list links are intact */
312 assert(b->ql.blink->ql.flink == b);
313 assert(b->ql.flink->ql.blink == b);
314 }
315
316 *buf = (uint8_t *)b + sizeof(struct bhead);
317 *len = bs - sizeof(struct bhead);
318
319 iterator->next_buf = BFH((uint8_t *)b + bs);
320 return true;
321 }
322
bpool_foreach(struct malloc_ctx * ctx,struct bpool_iterator * iterator,void ** buf)323 static bool bpool_foreach(struct malloc_ctx *ctx,
324 struct bpool_iterator *iterator, void **buf)
325 {
326 while (true) {
327 size_t len;
328 bool isfree;
329
330 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
331 if (isfree)
332 continue;
333 return true;
334 }
335
336 if ((iterator->pool_idx + 1) >= ctx->pool_len)
337 return false;
338
339 iterator->pool_idx++;
340 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
341 }
342 }
343
344 /* Convenience macro for looping over all allocated buffers */
345 #define BPOOL_FOREACH(ctx, iterator, bp) \
346 for (bpool_foreach_iterator_init((ctx),(iterator)); \
347 bpool_foreach((ctx),(iterator), (bp));)
348
raw_memalign(size_t hdr_size,size_t ftr_size,size_t alignment,size_t pl_size,struct malloc_ctx * ctx)349 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
350 size_t pl_size, struct malloc_ctx *ctx)
351 {
352 void *ptr = NULL;
353 bufsize s;
354
355 if (!alignment || !IS_POWER_OF_TWO(alignment))
356 return NULL;
357
358 raw_malloc_validate_pools(ctx);
359
360 /* Compute total size, excluding the header */
361 if (ADD_OVERFLOW(pl_size, ftr_size, &s))
362 goto out;
363
364 /* BGET doesn't like 0 sized allocations */
365 if (!s)
366 s++;
367
368 ptr = bget(alignment, hdr_size, s, &ctx->poolset);
369 out:
370 raw_malloc_return_hook(ptr, pl_size, ctx);
371
372 return ptr;
373 }
374
raw_malloc(size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)375 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
376 struct malloc_ctx *ctx)
377 {
378 /*
379 * Note that we're feeding SizeQ as alignment, this is the smallest
380 * alignment that bget() can use.
381 */
382 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx);
383 }
384
raw_free(void * ptr,struct malloc_ctx * ctx,bool wipe)385 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe)
386 {
387 raw_malloc_validate_pools(ctx);
388
389 if (ptr)
390 brel(ptr, &ctx->poolset, wipe);
391 }
392
raw_calloc(size_t hdr_size,size_t ftr_size,size_t pl_nmemb,size_t pl_size,struct malloc_ctx * ctx)393 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
394 size_t pl_size, struct malloc_ctx *ctx)
395 {
396 void *ptr = NULL;
397 bufsize s;
398
399 raw_malloc_validate_pools(ctx);
400
401 /* Compute total size, excluding hdr_size */
402 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
403 goto out;
404 if (ADD_OVERFLOW(s, ftr_size, &s))
405 goto out;
406
407 /* BGET doesn't like 0 sized allocations */
408 if (!s)
409 s++;
410
411 ptr = bgetz(0, hdr_size, s, &ctx->poolset);
412 out:
413 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx);
414
415 return ptr;
416 }
417
raw_realloc(void * ptr,size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)418 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
419 size_t pl_size, struct malloc_ctx *ctx)
420 {
421 void *p = NULL;
422 bufsize s;
423
424 /* Compute total size */
425 if (ADD_OVERFLOW(pl_size, hdr_size, &s))
426 goto out;
427 if (ADD_OVERFLOW(s, ftr_size, &s))
428 goto out;
429
430 raw_malloc_validate_pools(ctx);
431
432 /* BGET doesn't like 0 sized allocations */
433 if (!s)
434 s++;
435
436 p = bgetr(ptr, 0, 0, s, &ctx->poolset);
437 out:
438 raw_malloc_return_hook(p, pl_size, ctx);
439
440 return p;
441 }
442
443 /* Most of the stuff in this function is copied from bgetr() in bget.c */
bget_buf_size(void * buf)444 static __maybe_unused bufsize bget_buf_size(void *buf)
445 {
446 bufsize osize; /* Old size of buffer */
447 struct bhead *b;
448
449 b = BH(((char *)buf) - sizeof(struct bhead));
450 osize = -b->bsize;
451 #ifdef BECtl
452 if (osize == 0) {
453 /* Buffer acquired directly through acqfcn. */
454 struct bdhead *bd;
455
456 bd = BDH(((char *)buf) - sizeof(struct bdhead));
457 osize = bd->tsize - sizeof(struct bdhead) - bd->offs;
458 } else
459 #endif
460 osize -= sizeof(struct bhead);
461 assert(osize > 0);
462 return osize;
463 }
464
465 #ifdef ENABLE_MDBG
466
467 struct mdbg_hdr {
468 const char *fname;
469 uint16_t line;
470 uint32_t pl_size;
471 uint32_t magic;
472 #if defined(ARM64)
473 uint64_t pad;
474 #endif
475 };
476
477 #define MDBG_HEADER_MAGIC 0xadadadad
478 #define MDBG_FOOTER_MAGIC 0xecececec
479
mdbg_get_ftr_size(size_t pl_size)480 static size_t mdbg_get_ftr_size(size_t pl_size)
481 {
482 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
483
484 return ftr_pad + sizeof(uint32_t);
485 }
486
mdbg_get_footer(struct mdbg_hdr * hdr)487 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
488 {
489 uint32_t *footer;
490
491 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
492 mdbg_get_ftr_size(hdr->pl_size));
493 footer--;
494 return footer;
495 }
496
mdbg_update_hdr(struct mdbg_hdr * hdr,const char * fname,int lineno,size_t pl_size)497 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
498 int lineno, size_t pl_size)
499 {
500 uint32_t *footer;
501
502 hdr->fname = fname;
503 hdr->line = lineno;
504 hdr->pl_size = pl_size;
505 hdr->magic = MDBG_HEADER_MAGIC;
506
507 footer = mdbg_get_footer(hdr);
508 *footer = MDBG_FOOTER_MAGIC;
509 }
510
gen_mdbg_malloc(struct malloc_ctx * ctx,const char * fname,int lineno,size_t size)511 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname,
512 int lineno, size_t size)
513 {
514 struct mdbg_hdr *hdr;
515 uint32_t exceptions = malloc_lock(ctx);
516
517 /*
518 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM.
519 */
520 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0);
521
522 hdr = raw_malloc(sizeof(struct mdbg_hdr),
523 mdbg_get_ftr_size(size), size, ctx);
524 if (hdr) {
525 mdbg_update_hdr(hdr, fname, lineno, size);
526 hdr++;
527 }
528
529 malloc_unlock(ctx, exceptions);
530 return hdr;
531 }
532
assert_header(struct mdbg_hdr * hdr __maybe_unused)533 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
534 {
535 assert(hdr->magic == MDBG_HEADER_MAGIC);
536 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
537 }
538
gen_mdbg_free(struct malloc_ctx * ctx,void * ptr,bool wipe)539 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe)
540 {
541 struct mdbg_hdr *hdr = ptr;
542
543 if (hdr) {
544 hdr--;
545 assert_header(hdr);
546 hdr->magic = 0;
547 *mdbg_get_footer(hdr) = 0;
548 raw_free(hdr, ctx, wipe);
549 }
550 }
551
free_helper(void * ptr,bool wipe)552 static void free_helper(void *ptr, bool wipe)
553 {
554 uint32_t exceptions = malloc_lock(&malloc_ctx);
555
556 gen_mdbg_free(&malloc_ctx, ptr, wipe);
557 malloc_unlock(&malloc_ctx, exceptions);
558 }
559
gen_mdbg_calloc(struct malloc_ctx * ctx,const char * fname,int lineno,size_t nmemb,size_t size)560 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno,
561 size_t nmemb, size_t size)
562 {
563 struct mdbg_hdr *hdr;
564 uint32_t exceptions = malloc_lock(ctx);
565
566 hdr = raw_calloc(sizeof(struct mdbg_hdr),
567 mdbg_get_ftr_size(nmemb * size), nmemb, size,
568 ctx);
569 if (hdr) {
570 mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
571 hdr++;
572 }
573 malloc_unlock(ctx, exceptions);
574 return hdr;
575 }
576
gen_mdbg_realloc_unlocked(struct malloc_ctx * ctx,const char * fname,int lineno,void * ptr,size_t size)577 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname,
578 int lineno, void *ptr, size_t size)
579 {
580 struct mdbg_hdr *hdr = ptr;
581
582 if (hdr) {
583 hdr--;
584 assert_header(hdr);
585 }
586 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
587 mdbg_get_ftr_size(size), size, ctx);
588 if (hdr) {
589 mdbg_update_hdr(hdr, fname, lineno, size);
590 hdr++;
591 }
592 return hdr;
593 }
594
gen_mdbg_realloc(struct malloc_ctx * ctx,const char * fname,int lineno,void * ptr,size_t size)595 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname,
596 int lineno, void *ptr, size_t size)
597 {
598 void *p;
599 uint32_t exceptions = malloc_lock(ctx);
600
601 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size);
602 malloc_unlock(ctx, exceptions);
603 return p;
604 }
605
606 #define realloc_unlocked(ctx, ptr, size) \
607 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size))
608
gen_mdbg_memalign(struct malloc_ctx * ctx,const char * fname,int lineno,size_t alignment,size_t size)609 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname,
610 int lineno, size_t alignment, size_t size)
611 {
612 struct mdbg_hdr *hdr;
613 uint32_t exceptions = malloc_lock(ctx);
614
615 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
616 alignment, size, ctx);
617 if (hdr) {
618 mdbg_update_hdr(hdr, fname, lineno, size);
619 hdr++;
620 }
621 malloc_unlock(ctx, exceptions);
622 return hdr;
623 }
624
625
get_payload_start_size(void * raw_buf,size_t * size)626 static void *get_payload_start_size(void *raw_buf, size_t *size)
627 {
628 struct mdbg_hdr *hdr = raw_buf;
629
630 assert(bget_buf_size(hdr) >= hdr->pl_size);
631 *size = hdr->pl_size;
632 return hdr + 1;
633 }
634
gen_mdbg_check(struct malloc_ctx * ctx,int bufdump)635 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
636 {
637 struct bpool_iterator itr;
638 void *b;
639 uint32_t exceptions = malloc_lock(ctx);
640
641 raw_malloc_validate_pools(ctx);
642
643 BPOOL_FOREACH(ctx, &itr, &b) {
644 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
645
646 assert_header(hdr);
647
648 if (bufdump > 0) {
649 const char *fname = hdr->fname;
650
651 if (!fname)
652 fname = "unknown";
653
654 IMSG("buffer: %d bytes %s:%d\n",
655 hdr->pl_size, fname, hdr->line);
656 }
657 }
658
659 malloc_unlock(ctx, exceptions);
660 }
661
mdbg_malloc(const char * fname,int lineno,size_t size)662 void *mdbg_malloc(const char *fname, int lineno, size_t size)
663 {
664 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size);
665 }
666
mdbg_calloc(const char * fname,int lineno,size_t nmemb,size_t size)667 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
668 {
669 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size);
670 }
671
mdbg_realloc(const char * fname,int lineno,void * ptr,size_t size)672 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
673 {
674 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size);
675 }
676
mdbg_memalign(const char * fname,int lineno,size_t alignment,size_t size)677 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
678 size_t size)
679 {
680 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size);
681 }
682
mdbg_check(int bufdump)683 void mdbg_check(int bufdump)
684 {
685 gen_mdbg_check(&malloc_ctx, bufdump);
686 }
687
688 /*
689 * Since malloc debug is enabled, malloc() and friends are redirected by macros
690 * to mdbg_malloc() etc.
691 * We still want to export the standard entry points in case they are referenced
692 * by the application, either directly or via external libraries.
693 */
694 #undef malloc
malloc(size_t size)695 void *malloc(size_t size)
696 {
697 return mdbg_malloc(__FILE__, __LINE__, size);
698 }
699
700 #undef calloc
calloc(size_t nmemb,size_t size)701 void *calloc(size_t nmemb, size_t size)
702 {
703 return mdbg_calloc(__FILE__, __LINE__, nmemb, size);
704 }
705
706 #undef realloc
realloc(void * ptr,size_t size)707 void *realloc(void *ptr, size_t size)
708 {
709 return mdbg_realloc(__FILE__, __LINE__, ptr, size);
710 }
711
712 #else /* ENABLE_MDBG */
713
malloc(size_t size)714 void *malloc(size_t size)
715 {
716 void *p;
717 uint32_t exceptions = malloc_lock(&malloc_ctx);
718
719 p = raw_malloc(0, 0, size, &malloc_ctx);
720 malloc_unlock(&malloc_ctx, exceptions);
721 return p;
722 }
723
free_helper(void * ptr,bool wipe)724 static void free_helper(void *ptr, bool wipe)
725 {
726 uint32_t exceptions = malloc_lock(&malloc_ctx);
727
728 raw_free(ptr, &malloc_ctx, wipe);
729 malloc_unlock(&malloc_ctx, exceptions);
730 }
731
calloc(size_t nmemb,size_t size)732 void *calloc(size_t nmemb, size_t size)
733 {
734 void *p;
735 uint32_t exceptions = malloc_lock(&malloc_ctx);
736
737 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx);
738 malloc_unlock(&malloc_ctx, exceptions);
739 return p;
740 }
741
realloc_unlocked(struct malloc_ctx * ctx,void * ptr,size_t size)742 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr,
743 size_t size)
744 {
745 return raw_realloc(ptr, 0, 0, size, ctx);
746 }
747
realloc(void * ptr,size_t size)748 void *realloc(void *ptr, size_t size)
749 {
750 void *p;
751 uint32_t exceptions = malloc_lock(&malloc_ctx);
752
753 p = realloc_unlocked(&malloc_ctx, ptr, size);
754 malloc_unlock(&malloc_ctx, exceptions);
755 return p;
756 }
757
memalign(size_t alignment,size_t size)758 void *memalign(size_t alignment, size_t size)
759 {
760 void *p;
761 uint32_t exceptions = malloc_lock(&malloc_ctx);
762
763 p = raw_memalign(0, 0, alignment, size, &malloc_ctx);
764 malloc_unlock(&malloc_ctx, exceptions);
765 return p;
766 }
767
get_payload_start_size(void * ptr,size_t * size)768 static void *get_payload_start_size(void *ptr, size_t *size)
769 {
770 *size = bget_buf_size(ptr);
771 return ptr;
772 }
773
774 #endif
775
free(void * ptr)776 void free(void *ptr)
777 {
778 free_helper(ptr, false);
779 }
780
free_wipe(void * ptr)781 void free_wipe(void *ptr)
782 {
783 free_helper(ptr, true);
784 }
785
gen_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)786 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
787 {
788 void *p;
789 size_t l;
790 uint32_t exceptions;
791 uintptr_t start = (uintptr_t)buf;
792 uintptr_t end = start + len;
793 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead);
794
795 start = ROUNDUP(start, SizeQuant);
796 end = ROUNDDOWN(end, SizeQuant);
797
798 if (start > end || (end - start) < min_len) {
799 DMSG("Skipping too small pool");
800 return;
801 }
802
803 /* First pool requires a bigger size */
804 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) {
805 DMSG("Skipping too small initial pool");
806 return;
807 }
808
809 exceptions = malloc_lock(ctx);
810
811 tag_asan_free((void *)start, end - start);
812 bpool((void *)start, end - start, &ctx->poolset);
813 l = ctx->pool_len + 1;
814 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
815 assert(p);
816 ctx->pool = p;
817 ctx->pool[ctx->pool_len].buf = (void *)start;
818 ctx->pool[ctx->pool_len].len = end - start;
819 #ifdef BufStats
820 ctx->mstats.size += ctx->pool[ctx->pool_len].len;
821 #endif
822 ctx->pool_len = l;
823 malloc_unlock(ctx, exceptions);
824 }
825
gen_malloc_buffer_is_within_alloced(struct malloc_ctx * ctx,void * buf,size_t len)826 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
827 void *buf, size_t len)
828 {
829 struct bpool_iterator itr;
830 void *b;
831 uint8_t *start_buf = buf;
832 uint8_t *end_buf = start_buf + len;
833 bool ret = false;
834 uint32_t exceptions = malloc_lock(ctx);
835
836 raw_malloc_validate_pools(ctx);
837
838 /* Check for wrapping */
839 if (start_buf > end_buf)
840 goto out;
841
842 BPOOL_FOREACH(ctx, &itr, &b) {
843 uint8_t *start_b;
844 uint8_t *end_b;
845 size_t s;
846
847 start_b = get_payload_start_size(b, &s);
848 end_b = start_b + s;
849
850 if (start_buf >= start_b && end_buf <= end_b) {
851 ret = true;
852 goto out;
853 }
854 }
855
856 out:
857 malloc_unlock(ctx, exceptions);
858
859 return ret;
860 }
861
gen_malloc_buffer_overlaps_heap(struct malloc_ctx * ctx,void * buf,size_t len)862 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
863 void *buf, size_t len)
864 {
865 uintptr_t buf_start = (uintptr_t) buf;
866 uintptr_t buf_end = buf_start + len;
867 size_t n;
868 bool ret = false;
869 uint32_t exceptions = malloc_lock(ctx);
870
871 raw_malloc_validate_pools(ctx);
872
873 for (n = 0; n < ctx->pool_len; n++) {
874 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf;
875 uintptr_t pool_end = pool_start + ctx->pool[n].len;
876
877 if (buf_start > buf_end || pool_start > pool_end) {
878 ret = true; /* Wrapping buffers, shouldn't happen */
879 goto out;
880 }
881
882 if (buf_end > pool_start || buf_start < pool_end) {
883 ret = true;
884 goto out;
885 }
886 }
887
888 out:
889 malloc_unlock(ctx, exceptions);
890 return ret;
891 }
892
raw_malloc_get_ctx_size(void)893 size_t raw_malloc_get_ctx_size(void)
894 {
895 return sizeof(struct malloc_ctx);
896 }
897
raw_malloc_init_ctx(struct malloc_ctx * ctx)898 void raw_malloc_init_ctx(struct malloc_ctx *ctx)
899 {
900 memset(ctx, 0, sizeof(*ctx));
901 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist;
902 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist;
903 }
904
raw_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)905 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
906 {
907 gen_malloc_add_pool(ctx, buf, len);
908 }
909
910 #ifdef CFG_WITH_STATS
raw_malloc_get_stats(struct malloc_ctx * ctx,struct malloc_stats * stats)911 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats)
912 {
913 gen_malloc_get_stats(ctx, stats);
914 }
915 #endif
916
malloc_add_pool(void * buf,size_t len)917 void malloc_add_pool(void *buf, size_t len)
918 {
919 gen_malloc_add_pool(&malloc_ctx, buf, len);
920 }
921
malloc_buffer_is_within_alloced(void * buf,size_t len)922 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
923 {
924 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
925 }
926
malloc_buffer_overlaps_heap(void * buf,size_t len)927 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
928 {
929 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
930 }
931
932 #ifdef CFG_VIRTUALIZATION
933
934 #ifndef ENABLE_MDBG
935
nex_malloc(size_t size)936 void *nex_malloc(size_t size)
937 {
938 void *p;
939 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
940
941 p = raw_malloc(0, 0, size, &nex_malloc_ctx);
942 malloc_unlock(&nex_malloc_ctx, exceptions);
943 return p;
944 }
945
nex_calloc(size_t nmemb,size_t size)946 void *nex_calloc(size_t nmemb, size_t size)
947 {
948 void *p;
949 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
950
951 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx);
952 malloc_unlock(&nex_malloc_ctx, exceptions);
953 return p;
954 }
955
nex_realloc(void * ptr,size_t size)956 void *nex_realloc(void *ptr, size_t size)
957 {
958 void *p;
959 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
960
961 p = realloc_unlocked(&nex_malloc_ctx, ptr, size);
962 malloc_unlock(&nex_malloc_ctx, exceptions);
963 return p;
964 }
965
nex_memalign(size_t alignment,size_t size)966 void *nex_memalign(size_t alignment, size_t size)
967 {
968 void *p;
969 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
970
971 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx);
972 malloc_unlock(&nex_malloc_ctx, exceptions);
973 return p;
974 }
975
nex_free(void * ptr)976 void nex_free(void *ptr)
977 {
978 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
979
980 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */);
981 malloc_unlock(&nex_malloc_ctx, exceptions);
982 }
983
984 #else /* ENABLE_MDBG */
985
nex_mdbg_malloc(const char * fname,int lineno,size_t size)986 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size)
987 {
988 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size);
989 }
990
nex_mdbg_calloc(const char * fname,int lineno,size_t nmemb,size_t size)991 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
992 {
993 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size);
994 }
995
nex_mdbg_realloc(const char * fname,int lineno,void * ptr,size_t size)996 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
997 {
998 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size);
999 }
1000
nex_mdbg_memalign(const char * fname,int lineno,size_t alignment,size_t size)1001 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment,
1002 size_t size)
1003 {
1004 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size);
1005 }
1006
nex_mdbg_check(int bufdump)1007 void nex_mdbg_check(int bufdump)
1008 {
1009 gen_mdbg_check(&nex_malloc_ctx, bufdump);
1010 }
1011
nex_free(void * ptr)1012 void nex_free(void *ptr)
1013 {
1014 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1015
1016 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */);
1017 malloc_unlock(&nex_malloc_ctx, exceptions);
1018 }
1019
1020 #endif /* ENABLE_MDBG */
1021
nex_malloc_add_pool(void * buf,size_t len)1022 void nex_malloc_add_pool(void *buf, size_t len)
1023 {
1024 gen_malloc_add_pool(&nex_malloc_ctx, buf, len);
1025 }
1026
nex_malloc_buffer_is_within_alloced(void * buf,size_t len)1027 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len)
1028 {
1029 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len);
1030 }
1031
nex_malloc_buffer_overlaps_heap(void * buf,size_t len)1032 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len)
1033 {
1034 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len);
1035 }
1036
1037 #ifdef BufStats
1038
nex_malloc_reset_stats(void)1039 void nex_malloc_reset_stats(void)
1040 {
1041 gen_malloc_reset_stats(&nex_malloc_ctx);
1042 }
1043
nex_malloc_get_stats(struct malloc_stats * stats)1044 void nex_malloc_get_stats(struct malloc_stats *stats)
1045 {
1046 gen_malloc_get_stats(&nex_malloc_ctx, stats);
1047 }
1048
1049 #endif
1050
1051 #endif
1052