1 /*
2  * Two Levels Segregate Fit memory allocator (TLSF)
3  * Version 2.3.2
4  *
5  * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
6  *
7  * Thanks to Ismael Ripoll for his suggestions and reviews
8  *
9  * Copyright (C) 2007, 2006, 2005, 2004
10  *
11  * This code is released using a dual license strategy: GPL/LGPL
12  * You can choose the licence that better fits your requirements.
13  *
14  * Released under the terms of the GNU General Public License Version 2.0
15  * Released under the terms of the GNU Lesser General Public License
16  * Version 2.1
17  *
18  * This is kernel port of TLSF allocator.
19  * Original code can be found at: http://rtportal.upv.es/rtmalloc/
20  * Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com)
21  * (http://code.google.com/p/compcache/source/browse/trunk/sub-projects
22  *  /allocators/tlsf-kmod r229 dated Aug 27, 2008
23  * Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com)
24  */
25 
26 #include <xen/irq.h>
27 #include <xen/mm.h>
28 #include <xen/pfn.h>
29 #include <asm/time.h>
30 
31 #define MAX_POOL_NAME_LEN       16
32 
33 /* Some IMPORTANT TLSF parameters */
34 #define MEM_ALIGN       (sizeof(void *) * 2)
35 #define MEM_ALIGN_MASK  (~(MEM_ALIGN - 1))
36 
37 #define MAX_FLI         (30)
38 #define MAX_LOG2_SLI    (5)
39 #define MAX_SLI         (1 << MAX_LOG2_SLI)
40 
41 #define FLI_OFFSET      (6)
42 /* tlsf structure just will manage blocks bigger than 128 bytes */
43 #define SMALL_BLOCK     (128)
44 #define REAL_FLI        (MAX_FLI - FLI_OFFSET)
45 #define MIN_BLOCK_SIZE  (sizeof(struct free_ptr))
46 #define BHDR_OVERHEAD   (sizeof(struct bhdr) - MIN_BLOCK_SIZE)
47 
48 #define PTR_MASK        (sizeof(void *) - 1)
49 #define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK)
50 
51 #define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \
52                                 ((char *)(addr) + (r)))
53 #define ROUNDUP_SIZE(r)         (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK)
54 #define ROUNDDOWN_SIZE(r)       ((r) & MEM_ALIGN_MASK)
55 #define ROUNDUP_PAGE(r)         (((r) + PAGE_SIZE - 1) & PAGE_MASK)
56 
57 #define BLOCK_STATE     (0x1)
58 #define PREV_STATE      (0x2)
59 
60 /* bit 0 of the block size */
61 #define FREE_BLOCK      (0x1)
62 #define USED_BLOCK      (0x0)
63 
64 /* bit 1 of the block size */
65 #define PREV_FREE       (0x2)
66 #define PREV_USED       (0x0)
67 
68 static DEFINE_SPINLOCK(pool_list_lock);
69 static LIST_HEAD(pool_list_head);
70 
71 struct free_ptr {
72     struct bhdr *prev;
73     struct bhdr *next;
74 };
75 
76 struct bhdr {
77     /* All blocks in a region are linked in order of physical address */
78     struct bhdr *prev_hdr;
79     /*
80      * The size is stored in bytes
81      *  bit 0: block is free, if set
82      *  bit 1: previous block is free, if set
83      */
84     u32 size;
85     /* Free blocks in individual freelists are linked */
86     union {
87         struct free_ptr free_ptr;
88         u8 buffer[sizeof(struct free_ptr)];
89     } ptr;
90 };
91 
92 struct xmem_pool {
93     /* First level bitmap (REAL_FLI bits) */
94     u32 fl_bitmap;
95 
96     /* Second level bitmap */
97     u32 sl_bitmap[REAL_FLI];
98 
99     /* Free lists */
100     struct bhdr *matrix[REAL_FLI][MAX_SLI];
101 
102     spinlock_t lock;
103 
104     unsigned long max_size;
105     unsigned long grow_size;
106 
107     /* Basic stats */
108     unsigned long used_size;
109     unsigned long num_regions;
110 
111     /* User provided functions for expanding/shrinking pool */
112     xmem_pool_get_memory *get_mem;
113     xmem_pool_put_memory *put_mem;
114 
115     struct list_head list;
116 
117     char name[MAX_POOL_NAME_LEN];
118 };
119 
120 /*
121  * Helping functions
122  */
123 
124 /**
125  * Returns indexes (fl, sl) of the list used to serve request of size r
126  */
MAPPING_SEARCH(unsigned long * r,int * fl,int * sl)127 static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl)
128 {
129     int t;
130 
131     if ( *r < SMALL_BLOCK )
132     {
133         *fl = 0;
134         *sl = *r / (SMALL_BLOCK / MAX_SLI);
135     }
136     else
137     {
138         t = (1 << (flsl(*r) - 1 - MAX_LOG2_SLI)) - 1;
139         *r = *r + t;
140         *fl = flsl(*r) - 1;
141         *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
142         *fl -= FLI_OFFSET;
143         /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0!
144          *fl = *sl = 0;
145          */
146         *r &= ~t;
147     }
148 }
149 
150 /**
151  * Returns indexes (fl, sl) which is used as starting point to search
152  * for a block of size r. It also rounds up requested size(r) to the
153  * next list.
154  */
MAPPING_INSERT(unsigned long r,int * fl,int * sl)155 static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl)
156 {
157     if ( r < SMALL_BLOCK )
158     {
159         *fl = 0;
160         *sl = r / (SMALL_BLOCK / MAX_SLI);
161     }
162     else
163     {
164         *fl = flsl(r) - 1;
165         *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
166         *fl -= FLI_OFFSET;
167     }
168 }
169 
170 /**
171  * Returns first block from a list that hold blocks larger than or
172  * equal to the one pointed by the indexes (fl, sl)
173  */
FIND_SUITABLE_BLOCK(struct xmem_pool * p,int * fl,int * sl)174 static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl,
175                                                int *sl)
176 {
177     u32 tmp = p->sl_bitmap[*fl] & (~0u << *sl);
178     struct bhdr *b = NULL;
179 
180     if ( tmp )
181     {
182         *sl = ffs(tmp) - 1;
183         b = p->matrix[*fl][*sl];
184     }
185     else
186     {
187         *fl = ffs(p->fl_bitmap & (~0u << (*fl + 1))) - 1;
188         if ( likely(*fl > 0) )
189         {
190             *sl = ffs(p->sl_bitmap[*fl]) - 1;
191             b = p->matrix[*fl][*sl];
192         }
193     }
194 
195     return b;
196 }
197 
198 /**
199  * Remove first free block(b) from free list with indexes (fl, sl).
200  */
EXTRACT_BLOCK_HDR(struct bhdr * b,struct xmem_pool * p,int fl,int sl)201 static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl,
202                                      int sl)
203 {
204     p->matrix[fl][sl] = b->ptr.free_ptr.next;
205     if ( p->matrix[fl][sl] )
206     {
207         p->matrix[fl][sl]->ptr.free_ptr.prev = NULL;
208     }
209     else
210     {
211         clear_bit(sl, &p->sl_bitmap[fl]);
212         if ( !p->sl_bitmap[fl] )
213             clear_bit(fl, &p->fl_bitmap);
214     }
215     b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
216 }
217 
218 #define POISON_BYTE 0xAA
219 
220 /**
221  * Removes block(b) from free list with indexes (fl, sl)
222  */
EXTRACT_BLOCK(struct bhdr * b,struct xmem_pool * p,int fl,int sl)223 static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl,
224                                  int sl)
225 {
226     if ( b->ptr.free_ptr.next )
227         b->ptr.free_ptr.next->ptr.free_ptr.prev =
228             b->ptr.free_ptr.prev;
229     if ( b->ptr.free_ptr.prev )
230         b->ptr.free_ptr.prev->ptr.free_ptr.next =
231             b->ptr.free_ptr.next;
232     if ( p->matrix[fl][sl] == b )
233     {
234         p->matrix[fl][sl] = b->ptr.free_ptr.next;
235         if ( !p->matrix[fl][sl] )
236         {
237             clear_bit(sl, &p->sl_bitmap[fl]);
238             if ( !p->sl_bitmap[fl] )
239                 clear_bit (fl, &p->fl_bitmap);
240         }
241     }
242     b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
243 
244 #ifdef CONFIG_XMEM_POOL_POISON
245     if ( (b->size & BLOCK_SIZE_MASK) > MIN_BLOCK_SIZE )
246         ASSERT(!memchr_inv(b->ptr.buffer + MIN_BLOCK_SIZE, POISON_BYTE,
247                            (b->size & BLOCK_SIZE_MASK) - MIN_BLOCK_SIZE));
248 #endif /* CONFIG_XMEM_POOL_POISON */
249 }
250 
251 /**
252  * Insert block(b) in free list with indexes (fl, sl)
253  */
INSERT_BLOCK(struct bhdr * b,struct xmem_pool * p,int fl,int sl)254 static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl)
255 {
256 #ifdef CONFIG_XMEM_POOL_POISON
257     if ( (b->size & BLOCK_SIZE_MASK) > MIN_BLOCK_SIZE )
258         memset(b->ptr.buffer + MIN_BLOCK_SIZE, POISON_BYTE,
259                (b->size & BLOCK_SIZE_MASK) - MIN_BLOCK_SIZE);
260 #endif /* CONFIG_XMEM_POOL_POISON */
261 
262     b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
263     if ( p->matrix[fl][sl] )
264         p->matrix[fl][sl]->ptr.free_ptr.prev = b;
265     p->matrix[fl][sl] = b;
266     set_bit(sl, &p->sl_bitmap[fl]);
267     set_bit(fl, &p->fl_bitmap);
268 }
269 
270 /**
271  * Region is a virtually contiguous memory region and Pool is
272  * collection of such regions
273  */
ADD_REGION(void * region,unsigned long region_size,struct xmem_pool * pool)274 static inline void ADD_REGION(void *region, unsigned long region_size,
275                               struct xmem_pool *pool)
276 {
277     int fl, sl;
278     struct bhdr *b, *lb;
279 
280     b = (struct bhdr *)(region);
281     b->prev_hdr = NULL;
282     b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD)
283         | FREE_BLOCK | PREV_USED;
284     MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
285     INSERT_BLOCK(b, pool, fl, sl);
286     /* The sentinel block: allows us to know when we're in the last block */
287     lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
288     lb->prev_hdr = b;
289     lb->size = 0 | USED_BLOCK | PREV_FREE;
290     pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */
291     pool->num_regions++;
292 }
293 
294 /*
295  * TLSF pool-based allocator start.
296  */
297 
xmem_pool_create(const char * name,xmem_pool_get_memory get_mem,xmem_pool_put_memory put_mem,unsigned long max_size,unsigned long grow_size)298 struct xmem_pool *xmem_pool_create(
299     const char *name,
300     xmem_pool_get_memory get_mem,
301     xmem_pool_put_memory put_mem,
302     unsigned long max_size,
303     unsigned long grow_size)
304 {
305     struct xmem_pool *pool;
306     int pool_bytes, pool_order;
307 
308     BUG_ON(max_size && (max_size < grow_size));
309 
310     pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
311     pool_order = get_order_from_bytes(pool_bytes);
312 
313     pool = (void *)alloc_xenheap_pages(pool_order, 0);
314     if ( pool == NULL )
315         return NULL;
316     memset(pool, 0, pool_bytes);
317 
318     /* Round to next page boundary */
319     max_size = ROUNDUP_PAGE(max_size);
320     grow_size = ROUNDUP_PAGE(grow_size);
321 
322     /* pool global overhead not included in used size */
323     pool->used_size = 0;
324 
325     pool->max_size = max_size;
326     pool->grow_size = grow_size;
327     pool->get_mem = get_mem;
328     pool->put_mem = put_mem;
329     strlcpy(pool->name, name, sizeof(pool->name));
330 
331     spin_lock_init(&pool->lock);
332 
333     spin_lock(&pool_list_lock);
334     list_add_tail(&pool->list, &pool_list_head);
335     spin_unlock(&pool_list_lock);
336 
337     return pool;
338 }
339 
xmem_pool_get_used_size(struct xmem_pool * pool)340 unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
341 {
342     return pool->used_size;
343 }
344 
xmem_pool_get_total_size(struct xmem_pool * pool)345 unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
346 {
347     unsigned long total;
348     total = ROUNDUP_SIZE(sizeof(*pool))
349         + (pool->num_regions - 1) * pool->grow_size;
350     return total;
351 }
352 
xmem_pool_destroy(struct xmem_pool * pool)353 void xmem_pool_destroy(struct xmem_pool *pool)
354 {
355     int pool_bytes, pool_order;
356 
357     if ( pool == NULL )
358         return;
359 
360     /* Check for memory leaks in this pool */
361     if ( xmem_pool_get_used_size(pool) )
362         printk("memory leak in pool: %s (%p). "
363                "%lu bytes still in use.\n",
364                pool->name, pool, xmem_pool_get_used_size(pool));
365 
366     spin_lock(&pool_list_lock);
367     list_del_init(&pool->list);
368     spin_unlock(&pool_list_lock);
369 
370     pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
371     pool_order = get_order_from_bytes(pool_bytes);
372     free_xenheap_pages(pool,pool_order);
373 }
374 
xmem_pool_alloc(unsigned long size,struct xmem_pool * pool)375 void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
376 {
377     struct bhdr *b, *b2, *next_b, *region;
378     int fl, sl;
379     unsigned long tmp_size;
380 
381     if ( size < MIN_BLOCK_SIZE )
382         size = MIN_BLOCK_SIZE;
383     else
384     {
385         tmp_size = ROUNDUP_SIZE(size);
386         /* Guard against overflow. */
387         if ( tmp_size < size )
388             return NULL;
389         size = tmp_size;
390     }
391 
392     /* Rounding up the requested size and calculating fl and sl */
393 
394     spin_lock(&pool->lock);
395  retry_find:
396     MAPPING_SEARCH(&size, &fl, &sl);
397 
398     /* Searching a free block */
399     if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) )
400     {
401         /* Not found */
402         if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) )
403             goto out_locked;
404         if ( pool->max_size && (pool->num_regions * pool->grow_size
405                                 > pool->max_size) )
406             goto out_locked;
407         spin_unlock(&pool->lock);
408         if ( (region = pool->get_mem(pool->grow_size)) == NULL )
409             goto out;
410         spin_lock(&pool->lock);
411         ADD_REGION(region, pool->grow_size, pool);
412         goto retry_find;
413     }
414     EXTRACT_BLOCK_HDR(b, pool, fl, sl);
415 
416     /*-- found: */
417     next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
418     /* Should the block be split? */
419     tmp_size = (b->size & BLOCK_SIZE_MASK) - size;
420     if ( tmp_size >= sizeof(struct bhdr) )
421     {
422         tmp_size -= BHDR_OVERHEAD;
423         b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
424 
425         b2->size = tmp_size | FREE_BLOCK | PREV_USED;
426         b2->prev_hdr = b;
427 
428         next_b->prev_hdr = b2;
429 
430         MAPPING_INSERT(tmp_size, &fl, &sl);
431         INSERT_BLOCK(b2, pool, fl, sl);
432 
433         b->size = size | (b->size & PREV_STATE);
434     }
435     else
436     {
437         next_b->size &= (~PREV_FREE);
438         b->size &= (~FREE_BLOCK); /* Now it's used */
439     }
440 
441     pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
442 
443     spin_unlock(&pool->lock);
444     return (void *)b->ptr.buffer;
445 
446     /* Failed alloc */
447  out_locked:
448     spin_unlock(&pool->lock);
449 
450  out:
451     return NULL;
452 }
453 
xmem_pool_free(void * ptr,struct xmem_pool * pool)454 void xmem_pool_free(void *ptr, struct xmem_pool *pool)
455 {
456     struct bhdr *b, *tmp_b;
457     int fl = 0, sl = 0;
458 
459     if ( unlikely(ptr == NULL) )
460         return;
461 
462     b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD);
463 
464     spin_lock(&pool->lock);
465     b->size |= FREE_BLOCK;
466     pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
467     b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
468     tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
469     if ( tmp_b->size & FREE_BLOCK )
470     {
471         MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
472         EXTRACT_BLOCK(tmp_b, pool, fl, sl);
473         b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
474     }
475     if ( b->size & PREV_FREE )
476     {
477         tmp_b = b->prev_hdr;
478         MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
479         EXTRACT_BLOCK(tmp_b, pool, fl, sl);
480         tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
481         b = tmp_b;
482     }
483     tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
484     tmp_b->prev_hdr = b;
485 
486     MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
487 
488     if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) )
489     {
490         pool->put_mem(b);
491         pool->num_regions--;
492         pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
493         goto out;
494     }
495 
496     INSERT_BLOCK(b, pool, fl, sl);
497 
498     tmp_b->size |= PREV_FREE;
499     tmp_b->prev_hdr = b;
500  out:
501     spin_unlock(&pool->lock);
502 }
503 
xmem_pool_maxalloc(struct xmem_pool * pool)504 int xmem_pool_maxalloc(struct xmem_pool *pool)
505 {
506     return pool->grow_size - (2 * BHDR_OVERHEAD);
507 }
508 
509 /*
510  * Glue for xmalloc().
511  */
512 
513 static struct xmem_pool *xenpool;
514 
xmalloc_pool_get(unsigned long size)515 static void *xmalloc_pool_get(unsigned long size)
516 {
517     ASSERT(size == PAGE_SIZE);
518     return alloc_xenheap_page();
519 }
520 
xmalloc_pool_put(void * p)521 static void xmalloc_pool_put(void *p)
522 {
523     free_xenheap_page(p);
524 }
525 
xmalloc_whole_pages(unsigned long size,unsigned long align)526 static void *xmalloc_whole_pages(unsigned long size, unsigned long align)
527 {
528     unsigned int i, order;
529     void *res, *p;
530 
531     order = get_order_from_bytes(max(align, size));
532 
533     res = alloc_xenheap_pages(order, 0);
534     if ( res == NULL )
535         return NULL;
536 
537     for ( p = res + PAGE_ALIGN(size), i = 0; i < order; ++i )
538         if ( (unsigned long)p & (PAGE_SIZE << i) )
539         {
540             free_xenheap_pages(p, i);
541             p += PAGE_SIZE << i;
542         }
543 
544     PFN_ORDER(virt_to_page(res)) = PFN_UP(size);
545     /* Check that there was no truncation: */
546     ASSERT(PFN_ORDER(virt_to_page(res)) == PFN_UP(size));
547 
548     return res;
549 }
550 
tlsf_init(void)551 static void tlsf_init(void)
552 {
553     xenpool = xmem_pool_create("xmalloc", xmalloc_pool_get,
554                                xmalloc_pool_put, 0, PAGE_SIZE);
555     BUG_ON(!xenpool);
556 }
557 
558 /*
559  * xmalloc()
560  */
561 
strip_padding(void * p)562 static void *strip_padding(void *p)
563 {
564     const struct bhdr *b = p - BHDR_OVERHEAD;
565 
566     if ( b->size & FREE_BLOCK )
567     {
568         p -= b->size & ~FREE_BLOCK;
569         b = p - BHDR_OVERHEAD;
570         ASSERT(!(b->size & FREE_BLOCK));
571     }
572 
573     return p;
574 }
575 
add_padding(void * p,unsigned long align)576 static void *add_padding(void *p, unsigned long align)
577 {
578     unsigned int pad;
579 
580     if ( (pad = -(long)p & (align - 1)) != 0 )
581     {
582         void *q = p + pad;
583         struct bhdr *b = q - BHDR_OVERHEAD;
584 
585         ASSERT(q > p);
586         b->size = pad | FREE_BLOCK;
587         p = q;
588     }
589 
590     return p;
591 }
592 
_xmalloc(unsigned long size,unsigned long align)593 void *_xmalloc(unsigned long size, unsigned long align)
594 {
595     void *p = NULL;
596 
597     ASSERT(!in_irq());
598 
599     if ( !size )
600         return ZERO_BLOCK_PTR;
601 
602     ASSERT((align & (align - 1)) == 0);
603     if ( align < MEM_ALIGN )
604         align = MEM_ALIGN;
605     size += align - MEM_ALIGN;
606 
607     /* Guard against overflow. */
608     if ( size < align - MEM_ALIGN )
609         return NULL;
610 
611     if ( !xenpool )
612         tlsf_init();
613 
614     if ( size < PAGE_SIZE )
615         p = xmem_pool_alloc(size, xenpool);
616     if ( p == NULL )
617         return xmalloc_whole_pages(size - align + MEM_ALIGN, align);
618 
619     /* Add alignment padding. */
620     p = add_padding(p, align);
621 
622     ASSERT(((unsigned long)p & (align - 1)) == 0);
623     return p;
624 }
625 
_xzalloc(unsigned long size,unsigned long align)626 void *_xzalloc(unsigned long size, unsigned long align)
627 {
628     void *p = _xmalloc(size, align);
629 
630     return p ? memset(p, 0, size) : p;
631 }
632 
_xrealloc(void * ptr,unsigned long size,unsigned long align)633 void *_xrealloc(void *ptr, unsigned long size, unsigned long align)
634 {
635     unsigned long curr_size;
636     void *p;
637 
638     if ( !size )
639     {
640         xfree(ptr);
641         return ZERO_BLOCK_PTR;
642     }
643 
644     if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
645         return _xmalloc(size, align);
646 
647     ASSERT(!(align & (align - 1)));
648     if ( align < MEM_ALIGN )
649         align = MEM_ALIGN;
650 
651     if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
652     {
653         curr_size = (unsigned long)PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;
654 
655         if ( size <= curr_size && !((unsigned long)ptr & (align - 1)) )
656             return ptr;
657     }
658     else
659     {
660         unsigned long tmp_size = size + align - MEM_ALIGN;
661         const struct bhdr *b;
662 
663         /* Guard against overflow. */
664         if ( tmp_size < size )
665             return NULL;
666 
667         if ( tmp_size < PAGE_SIZE )
668             tmp_size = (tmp_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE :
669                 ROUNDUP_SIZE(tmp_size);
670 
671         /* Strip alignment padding. */
672         p = strip_padding(ptr);
673 
674         b = p - BHDR_OVERHEAD;
675         curr_size = b->size & BLOCK_SIZE_MASK;
676 
677         if ( tmp_size <= curr_size )
678         {
679             /* Add alignment padding. */
680             p = add_padding(p, align);
681 
682             ASSERT(!((unsigned long)p & (align - 1)));
683 
684             return p;
685         }
686     }
687 
688     p = _xmalloc(size, align);
689     if ( p )
690     {
691         memcpy(p, ptr, min(curr_size, size));
692         xfree(ptr);
693     }
694 
695     return p;
696 }
697 
xfree(void * p)698 void xfree(void *p)
699 {
700     if ( p == NULL || p == ZERO_BLOCK_PTR )
701         return;
702 
703     ASSERT(!in_irq());
704 
705     if ( !((unsigned long)p & (PAGE_SIZE - 1)) )
706     {
707         unsigned long size = PFN_ORDER(virt_to_page(p));
708         unsigned int i, order = get_order_from_pages(size);
709 
710         BUG_ON((unsigned long)p & ((PAGE_SIZE << order) - 1));
711         PFN_ORDER(virt_to_page(p)) = 0;
712         for ( i = 0; ; ++i )
713         {
714             if ( !(size & (1 << i)) )
715                 continue;
716             size -= 1 << i;
717             free_xenheap_pages(p + (size << PAGE_SHIFT), i);
718             if ( i + 1 >= order )
719                 return;
720         }
721     }
722 
723     /* Strip alignment padding. */
724     p = strip_padding(p);
725 
726     xmem_pool_free(p, xenpool);
727 }
728