1 #ifdef VMAP_VIRT_START
2 #include <xen/bitmap.h>
3 #include <xen/cache.h>
4 #include <xen/init.h>
5 #include <xen/mm.h>
6 #include <xen/pfn.h>
7 #include <xen/spinlock.h>
8 #include <xen/types.h>
9 #include <xen/vmap.h>
10 #include <asm/page.h>
11 
12 static DEFINE_SPINLOCK(vm_lock);
13 static void *__read_mostly vm_base[VMAP_REGION_NR];
14 #define vm_bitmap(x) ((unsigned long *)vm_base[x])
15 /* highest allocated bit in the bitmap */
16 static unsigned int __read_mostly vm_top[VMAP_REGION_NR];
17 /* total number of bits in the bitmap */
18 static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
19 /* lowest known clear bit in the bitmap */
20 static unsigned int vm_low[VMAP_REGION_NR];
21 
vm_init_type(enum vmap_region type,void * start,void * end)22 void __init vm_init_type(enum vmap_region type, void *start, void *end)
23 {
24     unsigned int i, nr;
25     unsigned long va;
26 
27     ASSERT(!vm_base[type]);
28 
29     vm_base[type] = start;
30     vm_end[type] = PFN_DOWN(end - start);
31     vm_low[type]= PFN_UP((vm_end[type] + 7) / 8);
32     nr = PFN_UP((vm_low[type] + 7) / 8);
33     vm_top[type] = nr * PAGE_SIZE * 8;
34 
35     for ( i = 0, va = (unsigned long)vm_bitmap(type); i < nr; ++i, va += PAGE_SIZE )
36     {
37         struct page_info *pg = alloc_domheap_page(NULL, 0);
38 
39         map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
40         clear_page((void *)va);
41     }
42     bitmap_fill(vm_bitmap(type), vm_low[type]);
43 
44     /* Populate page tables for the bitmap if necessary. */
45     populate_pt_range(va, vm_low[type] - nr);
46 }
47 
vm_alloc(unsigned int nr,unsigned int align,enum vmap_region t)48 static void *vm_alloc(unsigned int nr, unsigned int align,
49                       enum vmap_region t)
50 {
51     unsigned int start, bit;
52 
53     if ( !align )
54         align = 1;
55     else if ( align & (align - 1) )
56         align &= -align;
57 
58     ASSERT((t >= VMAP_DEFAULT) && (t < VMAP_REGION_NR));
59     if ( !vm_base[t] )
60         return NULL;
61 
62     spin_lock(&vm_lock);
63     for ( ; ; )
64     {
65         struct page_info *pg;
66 
67         ASSERT(vm_low[t] == vm_top[t] || !test_bit(vm_low[t], vm_bitmap(t)));
68         for ( start = vm_low[t]; start < vm_top[t]; )
69         {
70             bit = find_next_bit(vm_bitmap(t), vm_top[t], start + 1);
71             if ( bit > vm_top[t] )
72                 bit = vm_top[t];
73             /*
74              * Note that this skips the first bit, making the
75              * corresponding page a guard one.
76              */
77             start = (start + align) & ~(align - 1);
78             if ( bit < vm_top[t] )
79             {
80                 if ( start + nr < bit )
81                     break;
82                 start = find_next_zero_bit(vm_bitmap(t), vm_top[t], bit + 1);
83             }
84             else
85             {
86                 if ( start + nr <= bit )
87                     break;
88                 start = bit;
89             }
90         }
91 
92         if ( start < vm_top[t] )
93             break;
94 
95         spin_unlock(&vm_lock);
96 
97         if ( vm_top[t] >= vm_end[t] )
98             return NULL;
99 
100         pg = alloc_domheap_page(NULL, 0);
101         if ( !pg )
102             return NULL;
103 
104         spin_lock(&vm_lock);
105 
106         if ( start >= vm_top[t] )
107         {
108             unsigned long va = (unsigned long)vm_bitmap(t) + vm_top[t] / 8;
109 
110             if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
111             {
112                 clear_page((void *)va);
113                 vm_top[t] += PAGE_SIZE * 8;
114                 if ( vm_top[t] > vm_end[t] )
115                     vm_top[t] = vm_end[t];
116                 continue;
117             }
118         }
119 
120         free_domheap_page(pg);
121 
122         if ( start >= vm_top[t] )
123         {
124             spin_unlock(&vm_lock);
125             return NULL;
126         }
127     }
128 
129     for ( bit = start; bit < start + nr; ++bit )
130         __set_bit(bit, vm_bitmap(t));
131     if ( bit < vm_top[t] )
132         ASSERT(!test_bit(bit, vm_bitmap(t)));
133     else
134         ASSERT(bit == vm_top[t]);
135     if ( start <= vm_low[t] + 2 )
136         vm_low[t] = bit;
137     spin_unlock(&vm_lock);
138 
139     return vm_base[t] + start * PAGE_SIZE;
140 }
141 
vm_index(const void * va,enum vmap_region type)142 static unsigned int vm_index(const void *va, enum vmap_region type)
143 {
144     unsigned long addr = (unsigned long)va & ~(PAGE_SIZE - 1);
145     unsigned int idx;
146     unsigned long start = (unsigned long)vm_base[type];
147 
148     if ( !start )
149         return 0;
150 
151     if ( addr < start + (vm_end[type] / 8) ||
152          addr >= start + vm_top[type] * PAGE_SIZE )
153         return 0;
154 
155     idx = PFN_DOWN(va - vm_base[type]);
156     return !test_bit(idx - 1, vm_bitmap(type)) &&
157            test_bit(idx, vm_bitmap(type)) ? idx : 0;
158 }
159 
vm_size(const void * va,enum vmap_region type)160 static unsigned int vm_size(const void *va, enum vmap_region type)
161 {
162     unsigned int start = vm_index(va, type), end;
163 
164     if ( !start )
165         return 0;
166 
167     end = find_next_zero_bit(vm_bitmap(type), vm_top[type], start + 1);
168 
169     return min(end, vm_top[type]) - start;
170 }
171 
vm_free(const void * va)172 static void vm_free(const void *va)
173 {
174     enum vmap_region type = VMAP_DEFAULT;
175     unsigned int bit = vm_index(va, type);
176 
177     if ( !bit )
178     {
179         type = VMAP_XEN;
180         bit = vm_index(va, type);
181     }
182 
183     if ( !bit )
184     {
185         WARN_ON(va != NULL);
186         return;
187     }
188 
189     spin_lock(&vm_lock);
190     if ( bit < vm_low[type] )
191     {
192         vm_low[type] = bit - 1;
193         while ( !test_bit(vm_low[type] - 1, vm_bitmap(type)) )
194             --vm_low[type];
195     }
196     while ( __test_and_clear_bit(bit, vm_bitmap(type)) )
197         if ( ++bit == vm_top[type] )
198             break;
199     spin_unlock(&vm_lock);
200 }
201 
__vmap(const mfn_t * mfn,unsigned int granularity,unsigned int nr,unsigned int align,unsigned int flags,enum vmap_region type)202 void *__vmap(const mfn_t *mfn, unsigned int granularity,
203              unsigned int nr, unsigned int align, unsigned int flags,
204              enum vmap_region type)
205 {
206     void *va = vm_alloc(nr * granularity, align, type);
207     unsigned long cur = (unsigned long)va;
208 
209     for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
210     {
211         if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
212         {
213             vunmap(va);
214             va = NULL;
215         }
216     }
217 
218     return va;
219 }
220 
vmap(const mfn_t * mfn,unsigned int nr)221 void *vmap(const mfn_t *mfn, unsigned int nr)
222 {
223     return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
224 }
225 
vunmap(const void * va)226 void vunmap(const void *va)
227 {
228     unsigned long addr = (unsigned long)va;
229     unsigned int pages = vm_size(va, VMAP_DEFAULT);
230 
231     if ( !pages )
232         pages = vm_size(va, VMAP_XEN);
233 
234 #ifndef _PAGE_NONE
235     destroy_xen_mappings(addr, addr + PAGE_SIZE * pages);
236 #else /* Avoid tearing down intermediate page tables. */
237     map_pages_to_xen(addr, INVALID_MFN, pages, _PAGE_NONE);
238 #endif
239     vm_free(va);
240 }
241 
vmalloc_type(size_t size,enum vmap_region type)242 static void *vmalloc_type(size_t size, enum vmap_region type)
243 {
244     mfn_t *mfn;
245     size_t pages, i;
246     struct page_info *pg;
247     void *va;
248 
249     ASSERT(size);
250 
251     pages = PFN_UP(size);
252     mfn = xmalloc_array(mfn_t, pages);
253     if ( mfn == NULL )
254         return NULL;
255 
256     for ( i = 0; i < pages; i++ )
257     {
258         pg = alloc_domheap_page(NULL, 0);
259         if ( pg == NULL )
260             goto error;
261         mfn[i] = page_to_mfn(pg);
262     }
263 
264     va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
265     if ( va == NULL )
266         goto error;
267 
268     xfree(mfn);
269     return va;
270 
271  error:
272     while ( i-- )
273         free_domheap_page(mfn_to_page(mfn[i]));
274     xfree(mfn);
275     return NULL;
276 }
277 
vmalloc(size_t size)278 void *vmalloc(size_t size)
279 {
280     return vmalloc_type(size, VMAP_DEFAULT);
281 }
282 
vmalloc_xen(size_t size)283 void *vmalloc_xen(size_t size)
284 {
285     return vmalloc_type(size, VMAP_XEN);
286 }
287 
vzalloc(size_t size)288 void *vzalloc(size_t size)
289 {
290     void *p = vmalloc_type(size, VMAP_DEFAULT);
291     int i;
292 
293     if ( p == NULL )
294         return NULL;
295 
296     for ( i = 0; i < size; i += PAGE_SIZE )
297         clear_page(p + i);
298 
299     return p;
300 }
301 
vfree(void * va)302 void vfree(void *va)
303 {
304     unsigned int i, pages;
305     struct page_info *pg;
306     PAGE_LIST_HEAD(pg_list);
307     enum vmap_region type = VMAP_DEFAULT;
308 
309     if ( !va )
310         return;
311 
312     pages = vm_size(va, type);
313     if ( !pages )
314     {
315         type = VMAP_XEN;
316         pages = vm_size(va, type);
317     }
318     ASSERT(pages);
319 
320     for ( i = 0; i < pages; i++ )
321     {
322         struct page_info *page = vmap_to_page(va + i * PAGE_SIZE);
323 
324         ASSERT(page);
325         page_list_add(page, &pg_list);
326     }
327     vunmap(va);
328 
329     while ( (pg = page_list_remove_head(&pg_list)) != NULL )
330         free_domheap_page(pg);
331 }
332 #endif
333