1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include <linux/kmemleak.h>
7 #include <linux/sizes.h>
8
9 #include "i915_buddy.h"
10
11 #include "i915_gem.h"
12 #include "i915_utils.h"
13
14 static struct kmem_cache *slab_blocks;
15
i915_block_alloc(struct i915_buddy_mm * mm,struct i915_buddy_block * parent,unsigned int order,u64 offset)16 static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
17 struct i915_buddy_block *parent,
18 unsigned int order,
19 u64 offset)
20 {
21 struct i915_buddy_block *block;
22
23 GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
24
25 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
26 if (!block)
27 return NULL;
28
29 block->header = offset;
30 block->header |= order;
31 block->parent = parent;
32
33 GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
34 return block;
35 }
36
i915_block_free(struct i915_buddy_mm * mm,struct i915_buddy_block * block)37 static void i915_block_free(struct i915_buddy_mm *mm,
38 struct i915_buddy_block *block)
39 {
40 kmem_cache_free(slab_blocks, block);
41 }
42
mark_allocated(struct i915_buddy_block * block)43 static void mark_allocated(struct i915_buddy_block *block)
44 {
45 block->header &= ~I915_BUDDY_HEADER_STATE;
46 block->header |= I915_BUDDY_ALLOCATED;
47
48 list_del(&block->link);
49 }
50
mark_free(struct i915_buddy_mm * mm,struct i915_buddy_block * block)51 static void mark_free(struct i915_buddy_mm *mm,
52 struct i915_buddy_block *block)
53 {
54 block->header &= ~I915_BUDDY_HEADER_STATE;
55 block->header |= I915_BUDDY_FREE;
56
57 list_add(&block->link,
58 &mm->free_list[i915_buddy_block_order(block)]);
59 }
60
mark_split(struct i915_buddy_block * block)61 static void mark_split(struct i915_buddy_block *block)
62 {
63 block->header &= ~I915_BUDDY_HEADER_STATE;
64 block->header |= I915_BUDDY_SPLIT;
65
66 list_del(&block->link);
67 }
68
i915_buddy_init(struct i915_buddy_mm * mm,u64 size,u64 chunk_size)69 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
70 {
71 unsigned int i;
72 u64 offset;
73
74 if (size < chunk_size)
75 return -EINVAL;
76
77 if (chunk_size < PAGE_SIZE)
78 return -EINVAL;
79
80 if (!is_power_of_2(chunk_size))
81 return -EINVAL;
82
83 size = round_down(size, chunk_size);
84
85 mm->size = size;
86 mm->avail = size;
87 mm->chunk_size = chunk_size;
88 mm->max_order = ilog2(size) - ilog2(chunk_size);
89
90 GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
91
92 mm->free_list = kmalloc_array(mm->max_order + 1,
93 sizeof(struct list_head),
94 GFP_KERNEL);
95 if (!mm->free_list)
96 return -ENOMEM;
97
98 for (i = 0; i <= mm->max_order; ++i)
99 INIT_LIST_HEAD(&mm->free_list[i]);
100
101 mm->n_roots = hweight64(size);
102
103 mm->roots = kmalloc_array(mm->n_roots,
104 sizeof(struct i915_buddy_block *),
105 GFP_KERNEL);
106 if (!mm->roots)
107 goto out_free_list;
108
109 offset = 0;
110 i = 0;
111
112 /*
113 * Split into power-of-two blocks, in case we are given a size that is
114 * not itself a power-of-two.
115 */
116 do {
117 struct i915_buddy_block *root;
118 unsigned int order;
119 u64 root_size;
120
121 root_size = rounddown_pow_of_two(size);
122 order = ilog2(root_size) - ilog2(chunk_size);
123
124 root = i915_block_alloc(mm, NULL, order, offset);
125 if (!root)
126 goto out_free_roots;
127
128 mark_free(mm, root);
129
130 GEM_BUG_ON(i > mm->max_order);
131 GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
132
133 mm->roots[i] = root;
134
135 offset += root_size;
136 size -= root_size;
137 i++;
138 } while (size);
139
140 return 0;
141
142 out_free_roots:
143 while (i--)
144 i915_block_free(mm, mm->roots[i]);
145 kfree(mm->roots);
146 out_free_list:
147 kfree(mm->free_list);
148 return -ENOMEM;
149 }
150
i915_buddy_fini(struct i915_buddy_mm * mm)151 void i915_buddy_fini(struct i915_buddy_mm *mm)
152 {
153 int i;
154
155 for (i = 0; i < mm->n_roots; ++i) {
156 GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
157 i915_block_free(mm, mm->roots[i]);
158 }
159
160 GEM_WARN_ON(mm->avail != mm->size);
161
162 kfree(mm->roots);
163 kfree(mm->free_list);
164 }
165
split_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)166 static int split_block(struct i915_buddy_mm *mm,
167 struct i915_buddy_block *block)
168 {
169 unsigned int block_order = i915_buddy_block_order(block) - 1;
170 u64 offset = i915_buddy_block_offset(block);
171
172 GEM_BUG_ON(!i915_buddy_block_is_free(block));
173 GEM_BUG_ON(!i915_buddy_block_order(block));
174
175 block->left = i915_block_alloc(mm, block, block_order, offset);
176 if (!block->left)
177 return -ENOMEM;
178
179 block->right = i915_block_alloc(mm, block, block_order,
180 offset + (mm->chunk_size << block_order));
181 if (!block->right) {
182 i915_block_free(mm, block->left);
183 return -ENOMEM;
184 }
185
186 mark_free(mm, block->left);
187 mark_free(mm, block->right);
188
189 mark_split(block);
190
191 return 0;
192 }
193
194 static struct i915_buddy_block *
get_buddy(struct i915_buddy_block * block)195 get_buddy(struct i915_buddy_block *block)
196 {
197 struct i915_buddy_block *parent;
198
199 parent = block->parent;
200 if (!parent)
201 return NULL;
202
203 if (parent->left == block)
204 return parent->right;
205
206 return parent->left;
207 }
208
__i915_buddy_free(struct i915_buddy_mm * mm,struct i915_buddy_block * block)209 static void __i915_buddy_free(struct i915_buddy_mm *mm,
210 struct i915_buddy_block *block)
211 {
212 struct i915_buddy_block *parent;
213
214 while ((parent = block->parent)) {
215 struct i915_buddy_block *buddy;
216
217 buddy = get_buddy(block);
218
219 if (!i915_buddy_block_is_free(buddy))
220 break;
221
222 list_del(&buddy->link);
223
224 i915_block_free(mm, block);
225 i915_block_free(mm, buddy);
226
227 block = parent;
228 }
229
230 mark_free(mm, block);
231 }
232
i915_buddy_free(struct i915_buddy_mm * mm,struct i915_buddy_block * block)233 void i915_buddy_free(struct i915_buddy_mm *mm,
234 struct i915_buddy_block *block)
235 {
236 GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
237 mm->avail += i915_buddy_block_size(mm, block);
238 __i915_buddy_free(mm, block);
239 }
240
i915_buddy_free_list(struct i915_buddy_mm * mm,struct list_head * objects)241 void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
242 {
243 struct i915_buddy_block *block, *on;
244
245 list_for_each_entry_safe(block, on, objects, link) {
246 i915_buddy_free(mm, block);
247 cond_resched();
248 }
249 INIT_LIST_HEAD(objects);
250 }
251
252 /*
253 * Allocate power-of-two block. The order value here translates to:
254 *
255 * 0 = 2^0 * mm->chunk_size
256 * 1 = 2^1 * mm->chunk_size
257 * 2 = 2^2 * mm->chunk_size
258 * ...
259 */
260 struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm * mm,unsigned int order)261 i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
262 {
263 struct i915_buddy_block *block = NULL;
264 unsigned int i;
265 int err;
266
267 for (i = order; i <= mm->max_order; ++i) {
268 block = list_first_entry_or_null(&mm->free_list[i],
269 struct i915_buddy_block,
270 link);
271 if (block)
272 break;
273 }
274
275 if (!block)
276 return ERR_PTR(-ENOSPC);
277
278 GEM_BUG_ON(!i915_buddy_block_is_free(block));
279
280 while (i != order) {
281 err = split_block(mm, block);
282 if (unlikely(err))
283 goto out_free;
284
285 /* Go low */
286 block = block->left;
287 i--;
288 }
289
290 mark_allocated(block);
291 mm->avail -= i915_buddy_block_size(mm, block);
292 kmemleak_update_trace(block);
293 return block;
294
295 out_free:
296 if (i != order)
297 __i915_buddy_free(mm, block);
298 return ERR_PTR(err);
299 }
300
overlaps(u64 s1,u64 e1,u64 s2,u64 e2)301 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
302 {
303 return s1 <= e2 && e1 >= s2;
304 }
305
contains(u64 s1,u64 e1,u64 s2,u64 e2)306 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
307 {
308 return s1 <= s2 && e1 >= e2;
309 }
310
311 /*
312 * Allocate range. Note that it's safe to chain together multiple alloc_ranges
313 * with the same blocks list.
314 *
315 * Intended for pre-allocating portions of the address space, for example to
316 * reserve a block for the initial framebuffer or similar, hence the expectation
317 * here is that i915_buddy_alloc() is still the main vehicle for
318 * allocations, so if that's not the case then the drm_mm range allocator is
319 * probably a much better fit, and so you should probably go use that instead.
320 */
i915_buddy_alloc_range(struct i915_buddy_mm * mm,struct list_head * blocks,u64 start,u64 size)321 int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
322 struct list_head *blocks,
323 u64 start, u64 size)
324 {
325 struct i915_buddy_block *block;
326 struct i915_buddy_block *buddy;
327 LIST_HEAD(allocated);
328 LIST_HEAD(dfs);
329 u64 end;
330 int err;
331 int i;
332
333 if (size < mm->chunk_size)
334 return -EINVAL;
335
336 if (!IS_ALIGNED(size | start, mm->chunk_size))
337 return -EINVAL;
338
339 if (range_overflows(start, size, mm->size))
340 return -EINVAL;
341
342 for (i = 0; i < mm->n_roots; ++i)
343 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
344
345 end = start + size - 1;
346
347 do {
348 u64 block_start;
349 u64 block_end;
350
351 block = list_first_entry_or_null(&dfs,
352 struct i915_buddy_block,
353 tmp_link);
354 if (!block)
355 break;
356
357 list_del(&block->tmp_link);
358
359 block_start = i915_buddy_block_offset(block);
360 block_end = block_start + i915_buddy_block_size(mm, block) - 1;
361
362 if (!overlaps(start, end, block_start, block_end))
363 continue;
364
365 if (i915_buddy_block_is_allocated(block)) {
366 err = -ENOSPC;
367 goto err_free;
368 }
369
370 if (contains(start, end, block_start, block_end)) {
371 if (!i915_buddy_block_is_free(block)) {
372 err = -ENOSPC;
373 goto err_free;
374 }
375
376 mark_allocated(block);
377 mm->avail -= i915_buddy_block_size(mm, block);
378 list_add_tail(&block->link, &allocated);
379 continue;
380 }
381
382 if (!i915_buddy_block_is_split(block)) {
383 err = split_block(mm, block);
384 if (unlikely(err))
385 goto err_undo;
386 }
387
388 list_add(&block->right->tmp_link, &dfs);
389 list_add(&block->left->tmp_link, &dfs);
390 } while (1);
391
392 list_splice_tail(&allocated, blocks);
393 return 0;
394
395 err_undo:
396 /*
397 * We really don't want to leave around a bunch of split blocks, since
398 * bigger is better, so make sure we merge everything back before we
399 * free the allocated blocks.
400 */
401 buddy = get_buddy(block);
402 if (buddy &&
403 (i915_buddy_block_is_free(block) &&
404 i915_buddy_block_is_free(buddy)))
405 __i915_buddy_free(mm, block);
406
407 err_free:
408 i915_buddy_free_list(mm, &allocated);
409 return err;
410 }
411
i915_buddy_block_print(struct i915_buddy_mm * mm,struct i915_buddy_block * block,struct drm_printer * p)412 void i915_buddy_block_print(struct i915_buddy_mm *mm,
413 struct i915_buddy_block *block,
414 struct drm_printer *p)
415 {
416 u64 start = i915_buddy_block_offset(block);
417 u64 size = i915_buddy_block_size(mm, block);
418
419 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
420 }
421
i915_buddy_print(struct i915_buddy_mm * mm,struct drm_printer * p)422 void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p)
423 {
424 int order;
425
426 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
427 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
428
429 for (order = mm->max_order; order >= 0; order--) {
430 struct i915_buddy_block *block;
431 u64 count = 0, free;
432
433 list_for_each_entry(block, &mm->free_list[order], link) {
434 GEM_BUG_ON(!i915_buddy_block_is_free(block));
435 count++;
436 }
437
438 drm_printf(p, "order-%d ", order);
439
440 free = count * (mm->chunk_size << order);
441 if (free < SZ_1M)
442 drm_printf(p, "free: %lluKiB", free >> 10);
443 else
444 drm_printf(p, "free: %lluMiB", free >> 20);
445
446 drm_printf(p, ", pages: %llu\n", count);
447 }
448 }
449
450 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
451 #include "selftests/i915_buddy.c"
452 #endif
453
i915_buddy_module_exit(void)454 void i915_buddy_module_exit(void)
455 {
456 kmem_cache_destroy(slab_blocks);
457 }
458
i915_buddy_module_init(void)459 int __init i915_buddy_module_init(void)
460 {
461 slab_blocks = KMEM_CACHE(i915_buddy_block, 0);
462 if (!slab_blocks)
463 return -ENOMEM;
464
465 return 0;
466 }
467