1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages,unsigned int sg_page_sizes)13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14 struct sg_table *pages,
15 unsigned int sg_page_sizes)
16 {
17 struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 bool shrinkable;
20 int i;
21
22 assert_object_held_shared(obj);
23
24 if (i915_gem_object_is_volatile(obj))
25 obj->mm.madv = I915_MADV_DONTNEED;
26
27 /* Make the pages coherent with the GPU (flushing any swapin). */
28 if (obj->cache_dirty) {
29 obj->write_domain = 0;
30 if (i915_gem_object_has_struct_page(obj))
31 drm_clflush_sg(pages);
32 obj->cache_dirty = false;
33 }
34
35 obj->mm.get_page.sg_pos = pages->sgl;
36 obj->mm.get_page.sg_idx = 0;
37 obj->mm.get_dma_page.sg_pos = pages->sgl;
38 obj->mm.get_dma_page.sg_idx = 0;
39
40 obj->mm.pages = pages;
41
42 GEM_BUG_ON(!sg_page_sizes);
43 obj->mm.page_sizes.phys = sg_page_sizes;
44
45 /*
46 * Calculate the supported page-sizes which fit into the given
47 * sg_page_sizes. This will give us the page-sizes which we may be able
48 * to use opportunistically when later inserting into the GTT. For
49 * example if phys=2G, then in theory we should be able to use 1G, 2M,
50 * 64K or 4K pages, although in practice this will depend on a number of
51 * other factors.
52 */
53 obj->mm.page_sizes.sg = 0;
54 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
55 if (obj->mm.page_sizes.phys & ~0u << i)
56 obj->mm.page_sizes.sg |= BIT(i);
57 }
58 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
59
60 shrinkable = i915_gem_object_is_shrinkable(obj);
61
62 if (i915_gem_object_is_tiled(obj) &&
63 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
64 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
65 i915_gem_object_set_tiling_quirk(obj);
66 GEM_BUG_ON(!list_empty(&obj->mm.link));
67 atomic_inc(&obj->mm.shrink_pin);
68 shrinkable = false;
69 }
70
71 if (shrinkable) {
72 struct list_head *list;
73 unsigned long flags;
74
75 assert_object_held(obj);
76 spin_lock_irqsave(&i915->mm.obj_lock, flags);
77
78 i915->mm.shrink_count++;
79 i915->mm.shrink_memory += obj->base.size;
80
81 if (obj->mm.madv != I915_MADV_WILLNEED)
82 list = &i915->mm.purge_list;
83 else
84 list = &i915->mm.shrink_list;
85 list_add_tail(&obj->mm.link, list);
86
87 atomic_set(&obj->mm.shrink_pin, 0);
88 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
89 }
90 }
91
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)92 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
93 {
94 struct drm_i915_private *i915 = to_i915(obj->base.dev);
95 int err;
96
97 assert_object_held_shared(obj);
98
99 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
100 drm_dbg(&i915->drm,
101 "Attempting to obtain a purgeable object\n");
102 return -EFAULT;
103 }
104
105 err = obj->ops->get_pages(obj);
106 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
107
108 return err;
109 }
110
111 /* Ensure that the associated pages are gathered from the backing storage
112 * and pinned into our object. i915_gem_object_pin_pages() may be called
113 * multiple times before they are released by a single call to
114 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
115 * either as a result of memory pressure (reaping pages under the shrinker)
116 * or as the object is itself released.
117 */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)118 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
119 {
120 int err;
121
122 assert_object_held(obj);
123
124 assert_object_held_shared(obj);
125
126 if (unlikely(!i915_gem_object_has_pages(obj))) {
127 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
128
129 err = ____i915_gem_object_get_pages(obj);
130 if (err)
131 return err;
132
133 smp_mb__before_atomic();
134 }
135 atomic_inc(&obj->mm.pages_pin_count);
136
137 return 0;
138 }
139
i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object * obj)140 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
141 {
142 struct i915_gem_ww_ctx ww;
143 int err;
144
145 i915_gem_ww_ctx_init(&ww, true);
146 retry:
147 err = i915_gem_object_lock(obj, &ww);
148 if (!err)
149 err = i915_gem_object_pin_pages(obj);
150
151 if (err == -EDEADLK) {
152 err = i915_gem_ww_ctx_backoff(&ww);
153 if (!err)
154 goto retry;
155 }
156 i915_gem_ww_ctx_fini(&ww);
157 return err;
158 }
159
160 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)161 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
162 {
163 drm_gem_free_mmap_offset(&obj->base);
164 if (obj->ops->truncate)
165 obj->ops->truncate(obj);
166 }
167
168 /* Try to discard unwanted pages */
i915_gem_object_writeback(struct drm_i915_gem_object * obj)169 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
170 {
171 assert_object_held_shared(obj);
172 GEM_BUG_ON(i915_gem_object_has_pages(obj));
173
174 if (obj->ops->writeback)
175 obj->ops->writeback(obj);
176 }
177
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)178 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
179 {
180 struct radix_tree_iter iter;
181 void __rcu **slot;
182
183 rcu_read_lock();
184 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
185 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
186 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
187 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
188 rcu_read_unlock();
189 }
190
unmap_object(struct drm_i915_gem_object * obj,void * ptr)191 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
192 {
193 if (is_vmalloc_addr(ptr))
194 vunmap(ptr);
195 }
196
197 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)198 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
199 {
200 struct sg_table *pages;
201
202 assert_object_held_shared(obj);
203
204 pages = fetch_and_zero(&obj->mm.pages);
205 if (IS_ERR_OR_NULL(pages))
206 return pages;
207
208 if (i915_gem_object_is_volatile(obj))
209 obj->mm.madv = I915_MADV_WILLNEED;
210
211 i915_gem_object_make_unshrinkable(obj);
212
213 if (obj->mm.mapping) {
214 unmap_object(obj, page_mask_bits(obj->mm.mapping));
215 obj->mm.mapping = NULL;
216 }
217
218 __i915_gem_object_reset_page_iter(obj);
219 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
220
221 return pages;
222 }
223
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)224 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
225 {
226 struct sg_table *pages;
227
228 if (i915_gem_object_has_pinned_pages(obj))
229 return -EBUSY;
230
231 /* May be called by shrinker from within get_pages() (on another bo) */
232 assert_object_held_shared(obj);
233
234 i915_gem_object_release_mmap_offset(obj);
235
236 /*
237 * ->put_pages might need to allocate memory for the bit17 swizzle
238 * array, hence protect them from being reaped by removing them from gtt
239 * lists early.
240 */
241 pages = __i915_gem_object_unset_pages(obj);
242
243 /*
244 * XXX Temporary hijinx to avoid updating all backends to handle
245 * NULL pages. In the future, when we have more asynchronous
246 * get_pages backends we should be better able to handle the
247 * cancellation of the async task in a more uniform manner.
248 */
249 if (!IS_ERR_OR_NULL(pages))
250 obj->ops->put_pages(obj, pages);
251
252 return 0;
253 }
254
255 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)256 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
257 enum i915_map_type type)
258 {
259 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
260 struct page *stack[32], **pages = stack, *page;
261 struct sgt_iter iter;
262 pgprot_t pgprot;
263 void *vaddr;
264
265 switch (type) {
266 default:
267 MISSING_CASE(type);
268 fallthrough; /* to use PAGE_KERNEL anyway */
269 case I915_MAP_WB:
270 /*
271 * On 32b, highmem using a finite set of indirect PTE (i.e.
272 * vmap) to provide virtual mappings of the high pages.
273 * As these are finite, map_new_virtual() must wait for some
274 * other kmap() to finish when it runs out. If we map a large
275 * number of objects, there is no method for it to tell us
276 * to release the mappings, and we deadlock.
277 *
278 * However, if we make an explicit vmap of the page, that
279 * uses a larger vmalloc arena, and also has the ability
280 * to tell us to release unwanted mappings. Most importantly,
281 * it will fail and propagate an error instead of waiting
282 * forever.
283 *
284 * So if the page is beyond the 32b boundary, make an explicit
285 * vmap.
286 */
287 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
288 return page_address(sg_page(obj->mm.pages->sgl));
289 pgprot = PAGE_KERNEL;
290 break;
291 case I915_MAP_WC:
292 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
293 break;
294 }
295
296 if (n_pages > ARRAY_SIZE(stack)) {
297 /* Too big for stack -- allocate temporary array instead */
298 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
299 if (!pages)
300 return ERR_PTR(-ENOMEM);
301 }
302
303 i = 0;
304 for_each_sgt_page(page, iter, obj->mm.pages)
305 pages[i++] = page;
306 vaddr = vmap(pages, n_pages, 0, pgprot);
307 if (pages != stack)
308 kvfree(pages);
309
310 return vaddr ?: ERR_PTR(-ENOMEM);
311 }
312
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)313 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
314 enum i915_map_type type)
315 {
316 resource_size_t iomap = obj->mm.region->iomap.base -
317 obj->mm.region->region.start;
318 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
319 unsigned long stack[32], *pfns = stack, i;
320 struct sgt_iter iter;
321 dma_addr_t addr;
322 void *vaddr;
323
324 GEM_BUG_ON(type != I915_MAP_WC);
325
326 if (n_pfn > ARRAY_SIZE(stack)) {
327 /* Too big for stack -- allocate temporary array instead */
328 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
329 if (!pfns)
330 return ERR_PTR(-ENOMEM);
331 }
332
333 i = 0;
334 for_each_sgt_daddr(addr, iter, obj->mm.pages)
335 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
336 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
337 if (pfns != stack)
338 kvfree(pfns);
339
340 return vaddr ?: ERR_PTR(-ENOMEM);
341 }
342
343 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)344 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
345 enum i915_map_type type)
346 {
347 enum i915_map_type has_type;
348 bool pinned;
349 void *ptr;
350 int err;
351
352 if (!i915_gem_object_has_struct_page(obj) &&
353 !i915_gem_object_has_iomem(obj))
354 return ERR_PTR(-ENXIO);
355
356 assert_object_held(obj);
357
358 pinned = !(type & I915_MAP_OVERRIDE);
359 type &= ~I915_MAP_OVERRIDE;
360
361 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
362 if (unlikely(!i915_gem_object_has_pages(obj))) {
363 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
364
365 err = ____i915_gem_object_get_pages(obj);
366 if (err)
367 return ERR_PTR(err);
368
369 smp_mb__before_atomic();
370 }
371 atomic_inc(&obj->mm.pages_pin_count);
372 pinned = false;
373 }
374 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
375
376 /*
377 * For discrete our CPU mappings needs to be consistent in order to
378 * function correctly on !x86. When mapping things through TTM, we use
379 * the same rules to determine the caching type.
380 *
381 * The caching rules, starting from DG1:
382 *
383 * - If the object can be placed in device local-memory, then the
384 * pages should be allocated and mapped as write-combined only.
385 *
386 * - Everything else is always allocated and mapped as write-back,
387 * with the guarantee that everything is also coherent with the
388 * GPU.
389 *
390 * Internal users of lmem are already expected to get this right, so no
391 * fudging needed there.
392 */
393 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
394 if (type != I915_MAP_WC && !obj->mm.n_placements) {
395 ptr = ERR_PTR(-ENODEV);
396 goto err_unpin;
397 }
398
399 type = I915_MAP_WC;
400 } else if (IS_DGFX(to_i915(obj->base.dev))) {
401 type = I915_MAP_WB;
402 }
403
404 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
405 if (ptr && has_type != type) {
406 if (pinned) {
407 ptr = ERR_PTR(-EBUSY);
408 goto err_unpin;
409 }
410
411 unmap_object(obj, ptr);
412
413 ptr = obj->mm.mapping = NULL;
414 }
415
416 if (!ptr) {
417 if (GEM_WARN_ON(type == I915_MAP_WC &&
418 !static_cpu_has(X86_FEATURE_PAT)))
419 ptr = ERR_PTR(-ENODEV);
420 else if (i915_gem_object_has_struct_page(obj))
421 ptr = i915_gem_object_map_page(obj, type);
422 else
423 ptr = i915_gem_object_map_pfn(obj, type);
424 if (IS_ERR(ptr))
425 goto err_unpin;
426
427 obj->mm.mapping = page_pack_bits(ptr, type);
428 }
429
430 return ptr;
431
432 err_unpin:
433 atomic_dec(&obj->mm.pages_pin_count);
434 return ptr;
435 }
436
i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object * obj,enum i915_map_type type)437 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
438 enum i915_map_type type)
439 {
440 void *ret;
441
442 i915_gem_object_lock(obj, NULL);
443 ret = i915_gem_object_pin_map(obj, type);
444 i915_gem_object_unlock(obj);
445
446 return ret;
447 }
448
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)449 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
450 unsigned long offset,
451 unsigned long size)
452 {
453 enum i915_map_type has_type;
454 void *ptr;
455
456 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
457 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
458 offset, size, obj->base.size));
459
460 wmb(); /* let all previous writes be visible to coherent partners */
461 obj->mm.dirty = true;
462
463 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
464 return;
465
466 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
467 if (has_type == I915_MAP_WC)
468 return;
469
470 drm_clflush_virt_range(ptr + offset, size);
471 if (size == obj->base.size) {
472 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
473 obj->cache_dirty = false;
474 }
475 }
476
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)477 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
478 {
479 GEM_BUG_ON(!obj->mm.mapping);
480
481 /*
482 * We allow removing the mapping from underneath pinned pages!
483 *
484 * Furthermore, since this is an unsafe operation reserved only
485 * for construction time manipulation, we ignore locking prudence.
486 */
487 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
488
489 i915_gem_object_unpin_map(obj);
490 }
491
492 struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object * obj,struct i915_gem_object_page_iter * iter,unsigned int n,unsigned int * offset,bool dma)493 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
494 struct i915_gem_object_page_iter *iter,
495 unsigned int n,
496 unsigned int *offset,
497 bool dma)
498 {
499 struct scatterlist *sg;
500 unsigned int idx, count;
501
502 might_sleep();
503 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
504 if (!i915_gem_object_has_pinned_pages(obj))
505 assert_object_held(obj);
506
507 /* As we iterate forward through the sg, we record each entry in a
508 * radixtree for quick repeated (backwards) lookups. If we have seen
509 * this index previously, we will have an entry for it.
510 *
511 * Initial lookup is O(N), but this is amortized to O(1) for
512 * sequential page access (where each new request is consecutive
513 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
514 * i.e. O(1) with a large constant!
515 */
516 if (n < READ_ONCE(iter->sg_idx))
517 goto lookup;
518
519 mutex_lock(&iter->lock);
520
521 /* We prefer to reuse the last sg so that repeated lookup of this
522 * (or the subsequent) sg are fast - comparing against the last
523 * sg is faster than going through the radixtree.
524 */
525
526 sg = iter->sg_pos;
527 idx = iter->sg_idx;
528 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
529
530 while (idx + count <= n) {
531 void *entry;
532 unsigned long i;
533 int ret;
534
535 /* If we cannot allocate and insert this entry, or the
536 * individual pages from this range, cancel updating the
537 * sg_idx so that on this lookup we are forced to linearly
538 * scan onwards, but on future lookups we will try the
539 * insertion again (in which case we need to be careful of
540 * the error return reporting that we have already inserted
541 * this index).
542 */
543 ret = radix_tree_insert(&iter->radix, idx, sg);
544 if (ret && ret != -EEXIST)
545 goto scan;
546
547 entry = xa_mk_value(idx);
548 for (i = 1; i < count; i++) {
549 ret = radix_tree_insert(&iter->radix, idx + i, entry);
550 if (ret && ret != -EEXIST)
551 goto scan;
552 }
553
554 idx += count;
555 sg = ____sg_next(sg);
556 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
557 }
558
559 scan:
560 iter->sg_pos = sg;
561 iter->sg_idx = idx;
562
563 mutex_unlock(&iter->lock);
564
565 if (unlikely(n < idx)) /* insertion completed by another thread */
566 goto lookup;
567
568 /* In case we failed to insert the entry into the radixtree, we need
569 * to look beyond the current sg.
570 */
571 while (idx + count <= n) {
572 idx += count;
573 sg = ____sg_next(sg);
574 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
575 }
576
577 *offset = n - idx;
578 return sg;
579
580 lookup:
581 rcu_read_lock();
582
583 sg = radix_tree_lookup(&iter->radix, n);
584 GEM_BUG_ON(!sg);
585
586 /* If this index is in the middle of multi-page sg entry,
587 * the radix tree will contain a value entry that points
588 * to the start of that range. We will return the pointer to
589 * the base page and the offset of this page within the
590 * sg entry's range.
591 */
592 *offset = 0;
593 if (unlikely(xa_is_value(sg))) {
594 unsigned long base = xa_to_value(sg);
595
596 sg = radix_tree_lookup(&iter->radix, base);
597 GEM_BUG_ON(!sg);
598
599 *offset = n - base;
600 }
601
602 rcu_read_unlock();
603
604 return sg;
605 }
606
607 struct page *
i915_gem_object_get_page(struct drm_i915_gem_object * obj,unsigned int n)608 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
609 {
610 struct scatterlist *sg;
611 unsigned int offset;
612
613 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
614
615 sg = i915_gem_object_get_sg(obj, n, &offset);
616 return nth_page(sg_page(sg), offset);
617 }
618
619 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
620 struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,unsigned int n)621 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
622 unsigned int n)
623 {
624 struct page *page;
625
626 page = i915_gem_object_get_page(obj, n);
627 if (!obj->mm.dirty)
628 set_page_dirty(page);
629
630 return page;
631 }
632
633 dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,unsigned long n,unsigned int * len)634 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
635 unsigned long n,
636 unsigned int *len)
637 {
638 struct scatterlist *sg;
639 unsigned int offset;
640
641 sg = i915_gem_object_get_sg_dma(obj, n, &offset);
642
643 if (len)
644 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
645
646 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
647 }
648
649 dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,unsigned long n)650 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
651 unsigned long n)
652 {
653 return i915_gem_object_get_dma_address_len(obj, n, NULL);
654 }
655