1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/slab.h> /* fault-inject.h is not standalone! */
7
8 #include <linux/fault-inject.h>
9 #include <linux/sched/mm.h>
10
11 #include "gem/i915_gem_lmem.h"
12 #include "i915_trace.h"
13 #include "intel_gt.h"
14 #include "intel_gtt.h"
15
alloc_pt_lmem(struct i915_address_space * vm,int sz)16 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
17 {
18 struct drm_i915_gem_object *obj;
19
20 /*
21 * To avoid severe over-allocation when dealing with min_page_size
22 * restrictions, we override that behaviour here by allowing an object
23 * size and page layout which can be smaller. In practice this should be
24 * totally fine, since GTT paging structures are not typically inserted
25 * into the GTT.
26 *
27 * Note that we also hit this path for the scratch page, and for this
28 * case it might need to be 64K, but that should work fine here since we
29 * used the passed in size for the page size, which should ensure it
30 * also has the same alignment.
31 */
32 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
33 vm->lmem_pt_obj_flags);
34 /*
35 * Ensure all paging structures for this vm share the same dma-resv
36 * object underneath, with the idea that one object_lock() will lock
37 * them all at once.
38 */
39 if (!IS_ERR(obj)) {
40 obj->base.resv = i915_vm_resv_get(vm);
41 obj->shares_resv_from = vm;
42 }
43
44 return obj;
45 }
46
alloc_pt_dma(struct i915_address_space * vm,int sz)47 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
48 {
49 struct drm_i915_gem_object *obj;
50
51 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
52 i915_gem_shrink_all(vm->i915);
53
54 obj = i915_gem_object_create_internal(vm->i915, sz);
55 /*
56 * Ensure all paging structures for this vm share the same dma-resv
57 * object underneath, with the idea that one object_lock() will lock
58 * them all at once.
59 */
60 if (!IS_ERR(obj)) {
61 obj->base.resv = i915_vm_resv_get(vm);
62 obj->shares_resv_from = vm;
63 }
64
65 return obj;
66 }
67
map_pt_dma(struct i915_address_space * vm,struct drm_i915_gem_object * obj)68 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
69 {
70 enum i915_map_type type;
71 void *vaddr;
72
73 type = i915_coherent_map_type(vm->i915, obj, true);
74 vaddr = i915_gem_object_pin_map_unlocked(obj, type);
75 if (IS_ERR(vaddr))
76 return PTR_ERR(vaddr);
77
78 i915_gem_object_make_unshrinkable(obj);
79 return 0;
80 }
81
map_pt_dma_locked(struct i915_address_space * vm,struct drm_i915_gem_object * obj)82 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
83 {
84 enum i915_map_type type;
85 void *vaddr;
86
87 type = i915_coherent_map_type(vm->i915, obj, true);
88 vaddr = i915_gem_object_pin_map(obj, type);
89 if (IS_ERR(vaddr))
90 return PTR_ERR(vaddr);
91
92 i915_gem_object_make_unshrinkable(obj);
93 return 0;
94 }
95
__i915_vm_close(struct i915_address_space * vm)96 void __i915_vm_close(struct i915_address_space *vm)
97 {
98 struct i915_vma *vma, *vn;
99
100 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
101 return;
102
103 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
104 struct drm_i915_gem_object *obj = vma->obj;
105
106 /* Keep the obj (and hence the vma) alive as _we_ destroy it */
107 if (!kref_get_unless_zero(&obj->base.refcount))
108 continue;
109
110 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
111 WARN_ON(__i915_vma_unbind(vma));
112 __i915_vma_put(vma);
113
114 i915_gem_object_put(obj);
115 }
116 GEM_BUG_ON(!list_empty(&vm->bound_list));
117
118 mutex_unlock(&vm->mutex);
119 }
120
121 /* lock the vm into the current ww, if we lock one, we lock all */
i915_vm_lock_objects(struct i915_address_space * vm,struct i915_gem_ww_ctx * ww)122 int i915_vm_lock_objects(struct i915_address_space *vm,
123 struct i915_gem_ww_ctx *ww)
124 {
125 if (vm->scratch[0]->base.resv == &vm->_resv) {
126 return i915_gem_object_lock(vm->scratch[0], ww);
127 } else {
128 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
129
130 /* We borrowed the scratch page from ggtt, take the top level object */
131 return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
132 }
133 }
134
i915_address_space_fini(struct i915_address_space * vm)135 void i915_address_space_fini(struct i915_address_space *vm)
136 {
137 drm_mm_takedown(&vm->mm);
138 mutex_destroy(&vm->mutex);
139 }
140
141 /**
142 * i915_vm_resv_release - Final struct i915_address_space destructor
143 * @kref: Pointer to the &i915_address_space.resv_ref member.
144 *
145 * This function is called when the last lock sharer no longer shares the
146 * &i915_address_space._resv lock.
147 */
i915_vm_resv_release(struct kref * kref)148 void i915_vm_resv_release(struct kref *kref)
149 {
150 struct i915_address_space *vm =
151 container_of(kref, typeof(*vm), resv_ref);
152
153 dma_resv_fini(&vm->_resv);
154 kfree(vm);
155 }
156
__i915_vm_release(struct work_struct * work)157 static void __i915_vm_release(struct work_struct *work)
158 {
159 struct i915_address_space *vm =
160 container_of(work, struct i915_address_space, release_work);
161
162 vm->cleanup(vm);
163 i915_address_space_fini(vm);
164
165 i915_vm_resv_put(vm);
166 }
167
i915_vm_release(struct kref * kref)168 void i915_vm_release(struct kref *kref)
169 {
170 struct i915_address_space *vm =
171 container_of(kref, struct i915_address_space, ref);
172
173 GEM_BUG_ON(i915_is_ggtt(vm));
174 trace_i915_ppgtt_release(vm);
175
176 queue_work(vm->i915->wq, &vm->release_work);
177 }
178
i915_address_space_init(struct i915_address_space * vm,int subclass)179 void i915_address_space_init(struct i915_address_space *vm, int subclass)
180 {
181 kref_init(&vm->ref);
182
183 /*
184 * Special case for GGTT that has already done an early
185 * kref_init here.
186 */
187 if (!kref_read(&vm->resv_ref))
188 kref_init(&vm->resv_ref);
189
190 INIT_WORK(&vm->release_work, __i915_vm_release);
191 atomic_set(&vm->open, 1);
192
193 /*
194 * The vm->mutex must be reclaim safe (for use in the shrinker).
195 * Do a dummy acquire now under fs_reclaim so that any allocation
196 * attempt holding the lock is immediately reported by lockdep.
197 */
198 mutex_init(&vm->mutex);
199 lockdep_set_subclass(&vm->mutex, subclass);
200
201 if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
202 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
203 } else {
204 /*
205 * CHV + BXT VTD workaround use stop_machine(),
206 * which is allowed to allocate memory. This means &vm->mutex
207 * is the outer lock, and in theory we can allocate memory inside
208 * it through stop_machine().
209 *
210 * Add the annotation for this, we use trylock in shrinker.
211 */
212 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
213 might_alloc(GFP_KERNEL);
214 mutex_release(&vm->mutex.dep_map, _THIS_IP_);
215 }
216 dma_resv_init(&vm->_resv);
217
218 GEM_BUG_ON(!vm->total);
219 drm_mm_init(&vm->mm, 0, vm->total);
220 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
221
222 INIT_LIST_HEAD(&vm->bound_list);
223 }
224
clear_pages(struct i915_vma * vma)225 void clear_pages(struct i915_vma *vma)
226 {
227 GEM_BUG_ON(!vma->pages);
228
229 if (vma->pages != vma->obj->mm.pages) {
230 sg_free_table(vma->pages);
231 kfree(vma->pages);
232 }
233 vma->pages = NULL;
234
235 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
236 }
237
__px_vaddr(struct drm_i915_gem_object * p)238 void *__px_vaddr(struct drm_i915_gem_object *p)
239 {
240 enum i915_map_type type;
241
242 GEM_BUG_ON(!i915_gem_object_has_pages(p));
243 return page_unpack_bits(p->mm.mapping, &type);
244 }
245
__px_dma(struct drm_i915_gem_object * p)246 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
247 {
248 GEM_BUG_ON(!i915_gem_object_has_pages(p));
249 return sg_dma_address(p->mm.pages->sgl);
250 }
251
__px_page(struct drm_i915_gem_object * p)252 struct page *__px_page(struct drm_i915_gem_object *p)
253 {
254 GEM_BUG_ON(!i915_gem_object_has_pages(p));
255 return sg_page(p->mm.pages->sgl);
256 }
257
258 void
fill_page_dma(struct drm_i915_gem_object * p,const u64 val,unsigned int count)259 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
260 {
261 void *vaddr = __px_vaddr(p);
262
263 memset64(vaddr, val, count);
264 clflush_cache_range(vaddr, PAGE_SIZE);
265 }
266
poison_scratch_page(struct drm_i915_gem_object * scratch)267 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
268 {
269 void *vaddr = __px_vaddr(scratch);
270 u8 val;
271
272 val = 0;
273 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
274 val = POISON_FREE;
275
276 memset(vaddr, val, scratch->base.size);
277 }
278
setup_scratch_page(struct i915_address_space * vm)279 int setup_scratch_page(struct i915_address_space *vm)
280 {
281 unsigned long size;
282
283 /*
284 * In order to utilize 64K pages for an object with a size < 2M, we will
285 * need to support a 64K scratch page, given that every 16th entry for a
286 * page-table operating in 64K mode must point to a properly aligned 64K
287 * region, including any PTEs which happen to point to scratch.
288 *
289 * This is only relevant for the 48b PPGTT where we support
290 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
291 * scratch (read-only) between all vm, we create one 64k scratch page
292 * for all.
293 */
294 size = I915_GTT_PAGE_SIZE_4K;
295 if (i915_vm_is_4lvl(vm) &&
296 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
297 size = I915_GTT_PAGE_SIZE_64K;
298
299 do {
300 struct drm_i915_gem_object *obj;
301
302 obj = vm->alloc_pt_dma(vm, size);
303 if (IS_ERR(obj))
304 goto skip;
305
306 if (map_pt_dma(vm, obj))
307 goto skip_obj;
308
309 /* We need a single contiguous page for our scratch */
310 if (obj->mm.page_sizes.sg < size)
311 goto skip_obj;
312
313 /* And it needs to be correspondingly aligned */
314 if (__px_dma(obj) & (size - 1))
315 goto skip_obj;
316
317 /*
318 * Use a non-zero scratch page for debugging.
319 *
320 * We want a value that should be reasonably obvious
321 * to spot in the error state, while also causing a GPU hang
322 * if executed. We prefer using a clear page in production, so
323 * should it ever be accidentally used, the effect should be
324 * fairly benign.
325 */
326 poison_scratch_page(obj);
327
328 vm->scratch[0] = obj;
329 vm->scratch_order = get_order(size);
330 return 0;
331
332 skip_obj:
333 i915_gem_object_put(obj);
334 skip:
335 if (size == I915_GTT_PAGE_SIZE_4K)
336 return -ENOMEM;
337
338 size = I915_GTT_PAGE_SIZE_4K;
339 } while (1);
340 }
341
free_scratch(struct i915_address_space * vm)342 void free_scratch(struct i915_address_space *vm)
343 {
344 int i;
345
346 for (i = 0; i <= vm->top; i++)
347 i915_gem_object_put(vm->scratch[i]);
348 }
349
gtt_write_workarounds(struct intel_gt * gt)350 void gtt_write_workarounds(struct intel_gt *gt)
351 {
352 struct drm_i915_private *i915 = gt->i915;
353 struct intel_uncore *uncore = gt->uncore;
354
355 /*
356 * This function is for gtt related workarounds. This function is
357 * called on driver load and after a GPU reset, so you can place
358 * workarounds here even if they get overwritten by GPU reset.
359 */
360 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
361 if (IS_BROADWELL(i915))
362 intel_uncore_write(uncore,
363 GEN8_L3_LRA_1_GPGPU,
364 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
365 else if (IS_CHERRYVIEW(i915))
366 intel_uncore_write(uncore,
367 GEN8_L3_LRA_1_GPGPU,
368 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
369 else if (IS_GEN9_LP(i915))
370 intel_uncore_write(uncore,
371 GEN8_L3_LRA_1_GPGPU,
372 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
373 else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
374 intel_uncore_write(uncore,
375 GEN8_L3_LRA_1_GPGPU,
376 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
377
378 /*
379 * To support 64K PTEs we need to first enable the use of the
380 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
381 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
382 * shouldn't be needed after GEN10.
383 *
384 * 64K pages were first introduced from BDW+, although technically they
385 * only *work* from gen9+. For pre-BDW we instead have the option for
386 * 32K pages, but we don't currently have any support for it in our
387 * driver.
388 */
389 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
390 GRAPHICS_VER(i915) <= 10)
391 intel_uncore_rmw(uncore,
392 GEN8_GAMW_ECO_DEV_RW_IA,
393 0,
394 GAMW_ECO_ENABLE_64K_IPS_FIELD);
395
396 if (IS_GRAPHICS_VER(i915, 8, 11)) {
397 bool can_use_gtt_cache = true;
398
399 /*
400 * According to the BSpec if we use 2M/1G pages then we also
401 * need to disable the GTT cache. At least on BDW we can see
402 * visual corruption when using 2M pages, and not disabling the
403 * GTT cache.
404 */
405 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
406 can_use_gtt_cache = false;
407
408 /* WaGttCachingOffByDefault */
409 intel_uncore_write(uncore,
410 HSW_GTT_CACHE_EN,
411 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
412 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
413 intel_uncore_read(uncore,
414 HSW_GTT_CACHE_EN) == 0);
415 }
416 }
417
tgl_setup_private_ppat(struct intel_uncore * uncore)418 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
419 {
420 /* TGL doesn't support LLC or AGE settings */
421 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
422 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
423 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
424 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
425 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
426 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
427 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
428 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
429 }
430
icl_setup_private_ppat(struct intel_uncore * uncore)431 static void icl_setup_private_ppat(struct intel_uncore *uncore)
432 {
433 intel_uncore_write(uncore,
434 GEN10_PAT_INDEX(0),
435 GEN8_PPAT_WB | GEN8_PPAT_LLC);
436 intel_uncore_write(uncore,
437 GEN10_PAT_INDEX(1),
438 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
439 intel_uncore_write(uncore,
440 GEN10_PAT_INDEX(2),
441 GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
442 intel_uncore_write(uncore,
443 GEN10_PAT_INDEX(3),
444 GEN8_PPAT_UC);
445 intel_uncore_write(uncore,
446 GEN10_PAT_INDEX(4),
447 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
448 intel_uncore_write(uncore,
449 GEN10_PAT_INDEX(5),
450 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
451 intel_uncore_write(uncore,
452 GEN10_PAT_INDEX(6),
453 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
454 intel_uncore_write(uncore,
455 GEN10_PAT_INDEX(7),
456 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
457 }
458
459 /*
460 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
461 * bits. When using advanced contexts each context stores its own PAT, but
462 * writing this data shouldn't be harmful even in those cases.
463 */
bdw_setup_private_ppat(struct intel_uncore * uncore)464 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
465 {
466 struct drm_i915_private *i915 = uncore->i915;
467 u64 pat;
468
469 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
470 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
471 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
472 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
473 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
474 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
475 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
476
477 /* for scanout with eLLC */
478 if (GRAPHICS_VER(i915) >= 9)
479 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
480 else
481 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
482
483 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
484 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
485 }
486
chv_setup_private_ppat(struct intel_uncore * uncore)487 static void chv_setup_private_ppat(struct intel_uncore *uncore)
488 {
489 u64 pat;
490
491 /*
492 * Map WB on BDW to snooped on CHV.
493 *
494 * Only the snoop bit has meaning for CHV, the rest is
495 * ignored.
496 *
497 * The hardware will never snoop for certain types of accesses:
498 * - CPU GTT (GMADR->GGTT->no snoop->memory)
499 * - PPGTT page tables
500 * - some other special cycles
501 *
502 * As with BDW, we also need to consider the following for GT accesses:
503 * "For GGTT, there is NO pat_sel[2:0] from the entry,
504 * so RTL will always use the value corresponding to
505 * pat_sel = 000".
506 * Which means we must set the snoop bit in PAT entry 0
507 * in order to keep the global status page working.
508 */
509
510 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
511 GEN8_PPAT(1, 0) |
512 GEN8_PPAT(2, 0) |
513 GEN8_PPAT(3, 0) |
514 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
515 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
516 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
517 GEN8_PPAT(7, CHV_PPAT_SNOOP);
518
519 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
520 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
521 }
522
setup_private_pat(struct intel_uncore * uncore)523 void setup_private_pat(struct intel_uncore *uncore)
524 {
525 struct drm_i915_private *i915 = uncore->i915;
526
527 GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
528
529 if (GRAPHICS_VER(i915) >= 12)
530 tgl_setup_private_ppat(uncore);
531 else if (GRAPHICS_VER(i915) >= 11)
532 icl_setup_private_ppat(uncore);
533 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
534 chv_setup_private_ppat(uncore);
535 else
536 bdw_setup_private_ppat(uncore);
537 }
538
539 struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space * vm,unsigned long size)540 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
541 {
542 struct drm_i915_gem_object *obj;
543 struct i915_vma *vma;
544
545 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
546 if (IS_ERR(obj))
547 return ERR_CAST(obj);
548
549 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
550
551 vma = i915_vma_instance(obj, vm, NULL);
552 if (IS_ERR(vma)) {
553 i915_gem_object_put(obj);
554 return vma;
555 }
556
557 return vma;
558 }
559
560 struct i915_vma *
__vm_create_scratch_for_read_pinned(struct i915_address_space * vm,unsigned long size)561 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
562 {
563 struct i915_vma *vma;
564 int err;
565
566 vma = __vm_create_scratch_for_read(vm, size);
567 if (IS_ERR(vma))
568 return vma;
569
570 err = i915_vma_pin(vma, 0, 0,
571 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
572 if (err) {
573 i915_vma_put(vma);
574 return ERR_PTR(err);
575 }
576
577 return vma;
578 }
579
580 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
581 #include "selftests/mock_gtt.c"
582 #endif
583