1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2012 Intel Corporation
5 */
6
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "i915_drv.h"
16 #include "i915_gem_stolen.h"
17 #include "i915_vgpu.h"
18
19 /*
20 * The BIOS typically reserves some of the system's memory for the exclusive
21 * use of the integrated graphics. This memory is no longer available for
22 * use by the OS and so the user finds that his system has less memory
23 * available than he put in. We refer to this memory as stolen.
24 *
25 * The BIOS will allocate its framebuffer from the stolen memory. Our
26 * goal is try to reuse that object for our own fbcon which must always
27 * be available for panics. Anything else we can reuse the stolen memory
28 * for is a boon.
29 */
30
i915_gem_stolen_insert_node_in_range(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment,u64 start,u64 end)31 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
32 struct drm_mm_node *node, u64 size,
33 unsigned alignment, u64 start, u64 end)
34 {
35 int ret;
36
37 if (!drm_mm_initialized(&i915->mm.stolen))
38 return -ENODEV;
39
40 /* WaSkipStolenMemoryFirstPage:bdw+ */
41 if (GRAPHICS_VER(i915) >= 8 && start < 4096)
42 start = 4096;
43
44 mutex_lock(&i915->mm.stolen_lock);
45 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
46 size, alignment, 0,
47 start, end, DRM_MM_INSERT_BEST);
48 mutex_unlock(&i915->mm.stolen_lock);
49
50 return ret;
51 }
52
i915_gem_stolen_insert_node(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment)53 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
54 struct drm_mm_node *node, u64 size,
55 unsigned alignment)
56 {
57 return i915_gem_stolen_insert_node_in_range(i915, node,
58 size, alignment,
59 I915_GEM_STOLEN_BIAS,
60 U64_MAX);
61 }
62
i915_gem_stolen_remove_node(struct drm_i915_private * i915,struct drm_mm_node * node)63 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
64 struct drm_mm_node *node)
65 {
66 mutex_lock(&i915->mm.stolen_lock);
67 drm_mm_remove_node(node);
68 mutex_unlock(&i915->mm.stolen_lock);
69 }
70
i915_adjust_stolen(struct drm_i915_private * i915,struct resource * dsm)71 static int i915_adjust_stolen(struct drm_i915_private *i915,
72 struct resource *dsm)
73 {
74 struct i915_ggtt *ggtt = &i915->ggtt;
75 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
76 struct resource *r;
77
78 if (dsm->start == 0 || dsm->end <= dsm->start)
79 return -EINVAL;
80
81 /*
82 * TODO: We have yet too encounter the case where the GTT wasn't at the
83 * end of stolen. With that assumption we could simplify this.
84 */
85
86 /* Make sure we don't clobber the GTT if it's within stolen memory */
87 if (GRAPHICS_VER(i915) <= 4 &&
88 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
89 struct resource stolen[2] = {*dsm, *dsm};
90 struct resource ggtt_res;
91 resource_size_t ggtt_start;
92
93 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
94 if (GRAPHICS_VER(i915) == 4)
95 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
96 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
97 else
98 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
99
100 ggtt_res =
101 (struct resource) DEFINE_RES_MEM(ggtt_start,
102 ggtt_total_entries(ggtt) * 4);
103
104 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
105 stolen[0].end = ggtt_res.start;
106 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
107 stolen[1].start = ggtt_res.end;
108
109 /* Pick the larger of the two chunks */
110 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
111 *dsm = stolen[0];
112 else
113 *dsm = stolen[1];
114
115 if (stolen[0].start != stolen[1].start ||
116 stolen[0].end != stolen[1].end) {
117 drm_dbg(&i915->drm,
118 "GTT within stolen memory at %pR\n",
119 &ggtt_res);
120 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
121 dsm);
122 }
123 }
124
125 /*
126 * With stolen lmem, we don't need to check if the address range
127 * overlaps with the non-stolen system memory range, since lmem is local
128 * to the gpu.
129 */
130 if (HAS_LMEM(i915))
131 return 0;
132
133 /*
134 * Verify that nothing else uses this physical address. Stolen
135 * memory should be reserved by the BIOS and hidden from the
136 * kernel. So if the region is already marked as busy, something
137 * is seriously wrong.
138 */
139 r = devm_request_mem_region(i915->drm.dev, dsm->start,
140 resource_size(dsm),
141 "Graphics Stolen Memory");
142 if (r == NULL) {
143 /*
144 * One more attempt but this time requesting region from
145 * start + 1, as we have seen that this resolves the region
146 * conflict with the PCI Bus.
147 * This is a BIOS w/a: Some BIOS wrap stolen in the root
148 * PCI bus, but have an off-by-one error. Hence retry the
149 * reservation starting from 1 instead of 0.
150 * There's also BIOS with off-by-one on the other end.
151 */
152 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
153 resource_size(dsm) - 2,
154 "Graphics Stolen Memory");
155 /*
156 * GEN3 firmware likes to smash pci bridges into the stolen
157 * range. Apparently this works.
158 */
159 if (!r && GRAPHICS_VER(i915) != 3) {
160 drm_err(&i915->drm,
161 "conflict detected with stolen region: %pR\n",
162 dsm);
163
164 return -EBUSY;
165 }
166 }
167
168 return 0;
169 }
170
i915_gem_cleanup_stolen(struct drm_i915_private * i915)171 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
172 {
173 if (!drm_mm_initialized(&i915->mm.stolen))
174 return;
175
176 drm_mm_takedown(&i915->mm.stolen);
177 }
178
g4x_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)179 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
180 struct intel_uncore *uncore,
181 resource_size_t *base,
182 resource_size_t *size)
183 {
184 u32 reg_val = intel_uncore_read(uncore,
185 IS_GM45(i915) ?
186 CTG_STOLEN_RESERVED :
187 ELK_STOLEN_RESERVED);
188 resource_size_t stolen_top = i915->dsm.end + 1;
189
190 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
191 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
192
193 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
194 return;
195
196 /*
197 * Whether ILK really reuses the ELK register for this is unclear.
198 * Let's see if we catch anyone with this supposedly enabled on ILK.
199 */
200 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
201 "ILK stolen reserved found? 0x%08x\n",
202 reg_val);
203
204 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
205 return;
206
207 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
208 drm_WARN_ON(&i915->drm,
209 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
210
211 *size = stolen_top - *base;
212 }
213
gen6_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)214 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
215 struct intel_uncore *uncore,
216 resource_size_t *base,
217 resource_size_t *size)
218 {
219 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
220
221 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
222
223 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
224 return;
225
226 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
227
228 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
229 case GEN6_STOLEN_RESERVED_1M:
230 *size = 1024 * 1024;
231 break;
232 case GEN6_STOLEN_RESERVED_512K:
233 *size = 512 * 1024;
234 break;
235 case GEN6_STOLEN_RESERVED_256K:
236 *size = 256 * 1024;
237 break;
238 case GEN6_STOLEN_RESERVED_128K:
239 *size = 128 * 1024;
240 break;
241 default:
242 *size = 1024 * 1024;
243 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
244 }
245 }
246
vlv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)247 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
248 struct intel_uncore *uncore,
249 resource_size_t *base,
250 resource_size_t *size)
251 {
252 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
253 resource_size_t stolen_top = i915->dsm.end + 1;
254
255 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
256
257 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
258 return;
259
260 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
261 default:
262 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
263 fallthrough;
264 case GEN7_STOLEN_RESERVED_1M:
265 *size = 1024 * 1024;
266 break;
267 }
268
269 /*
270 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
271 * reserved location as (top - size).
272 */
273 *base = stolen_top - *size;
274 }
275
gen7_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)276 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
277 struct intel_uncore *uncore,
278 resource_size_t *base,
279 resource_size_t *size)
280 {
281 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
282
283 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
284
285 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
286 return;
287
288 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
289
290 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
291 case GEN7_STOLEN_RESERVED_1M:
292 *size = 1024 * 1024;
293 break;
294 case GEN7_STOLEN_RESERVED_256K:
295 *size = 256 * 1024;
296 break;
297 default:
298 *size = 1024 * 1024;
299 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
300 }
301 }
302
chv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)303 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
304 struct intel_uncore *uncore,
305 resource_size_t *base,
306 resource_size_t *size)
307 {
308 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
309
310 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
311
312 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
313 return;
314
315 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
316
317 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
318 case GEN8_STOLEN_RESERVED_1M:
319 *size = 1024 * 1024;
320 break;
321 case GEN8_STOLEN_RESERVED_2M:
322 *size = 2 * 1024 * 1024;
323 break;
324 case GEN8_STOLEN_RESERVED_4M:
325 *size = 4 * 1024 * 1024;
326 break;
327 case GEN8_STOLEN_RESERVED_8M:
328 *size = 8 * 1024 * 1024;
329 break;
330 default:
331 *size = 8 * 1024 * 1024;
332 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
333 }
334 }
335
bdw_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)336 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
337 struct intel_uncore *uncore,
338 resource_size_t *base,
339 resource_size_t *size)
340 {
341 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
342 resource_size_t stolen_top = i915->dsm.end + 1;
343
344 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
345
346 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
347 return;
348
349 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
350 return;
351
352 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
353 *size = stolen_top - *base;
354 }
355
icl_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)356 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
357 struct intel_uncore *uncore,
358 resource_size_t *base,
359 resource_size_t *size)
360 {
361 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
362
363 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
364
365 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
366
367 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
368 case GEN8_STOLEN_RESERVED_1M:
369 *size = 1024 * 1024;
370 break;
371 case GEN8_STOLEN_RESERVED_2M:
372 *size = 2 * 1024 * 1024;
373 break;
374 case GEN8_STOLEN_RESERVED_4M:
375 *size = 4 * 1024 * 1024;
376 break;
377 case GEN8_STOLEN_RESERVED_8M:
378 *size = 8 * 1024 * 1024;
379 break;
380 default:
381 *size = 8 * 1024 * 1024;
382 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
383 }
384 }
385
i915_gem_init_stolen(struct intel_memory_region * mem)386 static int i915_gem_init_stolen(struct intel_memory_region *mem)
387 {
388 struct drm_i915_private *i915 = mem->i915;
389 struct intel_uncore *uncore = &i915->uncore;
390 resource_size_t reserved_base, stolen_top;
391 resource_size_t reserved_total, reserved_size;
392
393 mutex_init(&i915->mm.stolen_lock);
394
395 if (intel_vgpu_active(i915)) {
396 drm_notice(&i915->drm,
397 "%s, disabling use of stolen memory\n",
398 "iGVT-g active");
399 return 0;
400 }
401
402 if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) {
403 drm_notice(&i915->drm,
404 "%s, disabling use of stolen memory\n",
405 "DMAR active");
406 return 0;
407 }
408
409 if (resource_size(&mem->region) == 0)
410 return 0;
411
412 i915->dsm = mem->region;
413
414 if (i915_adjust_stolen(i915, &i915->dsm))
415 return 0;
416
417 GEM_BUG_ON(i915->dsm.start == 0);
418 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
419
420 stolen_top = i915->dsm.end + 1;
421 reserved_base = stolen_top;
422 reserved_size = 0;
423
424 switch (GRAPHICS_VER(i915)) {
425 case 2:
426 case 3:
427 break;
428 case 4:
429 if (!IS_G4X(i915))
430 break;
431 fallthrough;
432 case 5:
433 g4x_get_stolen_reserved(i915, uncore,
434 &reserved_base, &reserved_size);
435 break;
436 case 6:
437 gen6_get_stolen_reserved(i915, uncore,
438 &reserved_base, &reserved_size);
439 break;
440 case 7:
441 if (IS_VALLEYVIEW(i915))
442 vlv_get_stolen_reserved(i915, uncore,
443 &reserved_base, &reserved_size);
444 else
445 gen7_get_stolen_reserved(i915, uncore,
446 &reserved_base, &reserved_size);
447 break;
448 case 8:
449 case 9:
450 if (IS_LP(i915))
451 chv_get_stolen_reserved(i915, uncore,
452 &reserved_base, &reserved_size);
453 else
454 bdw_get_stolen_reserved(i915, uncore,
455 &reserved_base, &reserved_size);
456 break;
457 default:
458 MISSING_CASE(GRAPHICS_VER(i915));
459 fallthrough;
460 case 11:
461 case 12:
462 icl_get_stolen_reserved(i915, uncore,
463 &reserved_base,
464 &reserved_size);
465 break;
466 }
467
468 /*
469 * Our expectation is that the reserved space is at the top of the
470 * stolen region and *never* at the bottom. If we see !reserved_base,
471 * it likely means we failed to read the registers correctly.
472 */
473 if (!reserved_base) {
474 drm_err(&i915->drm,
475 "inconsistent reservation %pa + %pa; ignoring\n",
476 &reserved_base, &reserved_size);
477 reserved_base = stolen_top;
478 reserved_size = 0;
479 }
480
481 i915->dsm_reserved =
482 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
483
484 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
485 drm_err(&i915->drm,
486 "Stolen reserved area %pR outside stolen memory %pR\n",
487 &i915->dsm_reserved, &i915->dsm);
488 return 0;
489 }
490
491 /* It is possible for the reserved area to end before the end of stolen
492 * memory, so just consider the start. */
493 reserved_total = stolen_top - reserved_base;
494
495 drm_dbg(&i915->drm,
496 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
497 (u64)resource_size(&i915->dsm) >> 10,
498 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
499
500 i915->stolen_usable_size =
501 resource_size(&i915->dsm) - reserved_total;
502
503 /* Basic memrange allocator for stolen space. */
504 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
505
506 return 0;
507 }
508
dbg_poison(struct i915_ggtt * ggtt,dma_addr_t addr,resource_size_t size,u8 x)509 static void dbg_poison(struct i915_ggtt *ggtt,
510 dma_addr_t addr, resource_size_t size,
511 u8 x)
512 {
513 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
514 if (!drm_mm_node_allocated(&ggtt->error_capture))
515 return;
516
517 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
518 return; /* beware stop_machine() inversion */
519
520 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
521
522 mutex_lock(&ggtt->error_mutex);
523 while (size) {
524 void __iomem *s;
525
526 ggtt->vm.insert_page(&ggtt->vm, addr,
527 ggtt->error_capture.start,
528 I915_CACHE_NONE, 0);
529 mb();
530
531 s = io_mapping_map_wc(&ggtt->iomap,
532 ggtt->error_capture.start,
533 PAGE_SIZE);
534 memset_io(s, x, PAGE_SIZE);
535 io_mapping_unmap(s);
536
537 addr += PAGE_SIZE;
538 size -= PAGE_SIZE;
539 }
540 mb();
541 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
542 mutex_unlock(&ggtt->error_mutex);
543 #endif
544 }
545
546 static struct sg_table *
i915_pages_create_for_stolen(struct drm_device * dev,resource_size_t offset,resource_size_t size)547 i915_pages_create_for_stolen(struct drm_device *dev,
548 resource_size_t offset, resource_size_t size)
549 {
550 struct drm_i915_private *i915 = to_i915(dev);
551 struct sg_table *st;
552 struct scatterlist *sg;
553
554 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
555
556 /* We hide that we have no struct page backing our stolen object
557 * by wrapping the contiguous physical allocation with a fake
558 * dma mapping in a single scatterlist.
559 */
560
561 st = kmalloc(sizeof(*st), GFP_KERNEL);
562 if (st == NULL)
563 return ERR_PTR(-ENOMEM);
564
565 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
566 kfree(st);
567 return ERR_PTR(-ENOMEM);
568 }
569
570 sg = st->sgl;
571 sg->offset = 0;
572 sg->length = size;
573
574 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
575 sg_dma_len(sg) = size;
576
577 return st;
578 }
579
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object * obj)580 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
581 {
582 struct sg_table *pages =
583 i915_pages_create_for_stolen(obj->base.dev,
584 obj->stolen->start,
585 obj->stolen->size);
586 if (IS_ERR(pages))
587 return PTR_ERR(pages);
588
589 dbg_poison(&to_i915(obj->base.dev)->ggtt,
590 sg_dma_address(pages->sgl),
591 sg_dma_len(pages->sgl),
592 POISON_INUSE);
593
594 __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
595
596 return 0;
597 }
598
i915_gem_object_put_pages_stolen(struct drm_i915_gem_object * obj,struct sg_table * pages)599 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
600 struct sg_table *pages)
601 {
602 /* Should only be called from i915_gem_object_release_stolen() */
603
604 dbg_poison(&to_i915(obj->base.dev)->ggtt,
605 sg_dma_address(pages->sgl),
606 sg_dma_len(pages->sgl),
607 POISON_FREE);
608
609 sg_free_table(pages);
610 kfree(pages);
611 }
612
613 static void
i915_gem_object_release_stolen(struct drm_i915_gem_object * obj)614 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
615 {
616 struct drm_i915_private *i915 = to_i915(obj->base.dev);
617 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
618
619 GEM_BUG_ON(!stolen);
620 i915_gem_stolen_remove_node(i915, stolen);
621 kfree(stolen);
622
623 i915_gem_object_release_memory_region(obj);
624 }
625
626 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
627 .name = "i915_gem_object_stolen",
628 .get_pages = i915_gem_object_get_pages_stolen,
629 .put_pages = i915_gem_object_put_pages_stolen,
630 .release = i915_gem_object_release_stolen,
631 };
632
__i915_gem_object_create_stolen(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,struct drm_mm_node * stolen)633 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
634 struct drm_i915_gem_object *obj,
635 struct drm_mm_node *stolen)
636 {
637 static struct lock_class_key lock_class;
638 unsigned int cache_level;
639 unsigned int flags;
640 int err;
641
642 /*
643 * Stolen objects are always physically contiguous since we just
644 * allocate one big block underneath using the drm_mm range allocator.
645 */
646 flags = I915_BO_ALLOC_CONTIGUOUS;
647
648 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
649 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
650
651 obj->stolen = stolen;
652 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
653 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
654 i915_gem_object_set_cache_coherency(obj, cache_level);
655
656 if (WARN_ON(!i915_gem_object_trylock(obj)))
657 return -EBUSY;
658
659 i915_gem_object_init_memory_region(obj, mem);
660
661 err = i915_gem_object_pin_pages(obj);
662 if (err)
663 i915_gem_object_release_memory_region(obj);
664 i915_gem_object_unlock(obj);
665
666 return err;
667 }
668
_i915_gem_object_stolen_init(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,resource_size_t size,resource_size_t page_size,unsigned int flags)669 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
670 struct drm_i915_gem_object *obj,
671 resource_size_t size,
672 resource_size_t page_size,
673 unsigned int flags)
674 {
675 struct drm_i915_private *i915 = mem->i915;
676 struct drm_mm_node *stolen;
677 int ret;
678
679 if (!drm_mm_initialized(&i915->mm.stolen))
680 return -ENODEV;
681
682 if (size == 0)
683 return -EINVAL;
684
685 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
686 if (!stolen)
687 return -ENOMEM;
688
689 ret = i915_gem_stolen_insert_node(i915, stolen, size,
690 mem->min_page_size);
691 if (ret)
692 goto err_free;
693
694 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
695 if (ret)
696 goto err_remove;
697
698 return 0;
699
700 err_remove:
701 i915_gem_stolen_remove_node(i915, stolen);
702 err_free:
703 kfree(stolen);
704 return ret;
705 }
706
707 struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private * i915,resource_size_t size)708 i915_gem_object_create_stolen(struct drm_i915_private *i915,
709 resource_size_t size)
710 {
711 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
712 }
713
init_stolen_smem(struct intel_memory_region * mem)714 static int init_stolen_smem(struct intel_memory_region *mem)
715 {
716 /*
717 * Initialise stolen early so that we may reserve preallocated
718 * objects for the BIOS to KMS transition.
719 */
720 return i915_gem_init_stolen(mem);
721 }
722
release_stolen_smem(struct intel_memory_region * mem)723 static void release_stolen_smem(struct intel_memory_region *mem)
724 {
725 i915_gem_cleanup_stolen(mem->i915);
726 }
727
728 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
729 .init = init_stolen_smem,
730 .release = release_stolen_smem,
731 .init_object = _i915_gem_object_stolen_init,
732 };
733
init_stolen_lmem(struct intel_memory_region * mem)734 static int init_stolen_lmem(struct intel_memory_region *mem)
735 {
736 int err;
737
738 if (GEM_WARN_ON(resource_size(&mem->region) == 0))
739 return -ENODEV;
740
741 if (!io_mapping_init_wc(&mem->iomap,
742 mem->io_start,
743 resource_size(&mem->region)))
744 return -EIO;
745
746 /*
747 * TODO: For stolen lmem we mostly just care about populating the dsm
748 * related bits and setting up the drm_mm allocator for the range.
749 * Perhaps split up i915_gem_init_stolen() for this.
750 */
751 err = i915_gem_init_stolen(mem);
752 if (err)
753 goto err_fini;
754
755 return 0;
756
757 err_fini:
758 io_mapping_fini(&mem->iomap);
759 return err;
760 }
761
release_stolen_lmem(struct intel_memory_region * mem)762 static void release_stolen_lmem(struct intel_memory_region *mem)
763 {
764 io_mapping_fini(&mem->iomap);
765 i915_gem_cleanup_stolen(mem->i915);
766 }
767
768 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
769 .init = init_stolen_lmem,
770 .release = release_stolen_lmem,
771 .init_object = _i915_gem_object_stolen_init,
772 };
773
774 struct intel_memory_region *
i915_gem_stolen_lmem_setup(struct drm_i915_private * i915,u16 type,u16 instance)775 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
776 u16 instance)
777 {
778 struct intel_uncore *uncore = &i915->uncore;
779 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
780 struct intel_memory_region *mem;
781 resource_size_t io_start;
782 resource_size_t lmem_size;
783 u64 lmem_base;
784
785 lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
786 if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
787 return ERR_PTR(-ENODEV);
788
789 lmem_size = pci_resource_len(pdev, 2) - lmem_base;
790 io_start = pci_resource_start(pdev, 2) + lmem_base;
791
792 mem = intel_memory_region_create(i915, lmem_base, lmem_size,
793 I915_GTT_PAGE_SIZE_4K, io_start,
794 type, instance,
795 &i915_region_stolen_lmem_ops);
796 if (IS_ERR(mem))
797 return mem;
798
799 /*
800 * TODO: consider creating common helper to just print all the
801 * interesting stuff from intel_memory_region, which we can use for all
802 * our probed regions.
803 */
804
805 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
806 &mem->io_start);
807
808 intel_memory_region_set_name(mem, "stolen-local");
809
810 mem->private = true;
811
812 return mem;
813 }
814
815 struct intel_memory_region*
i915_gem_stolen_smem_setup(struct drm_i915_private * i915,u16 type,u16 instance)816 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
817 u16 instance)
818 {
819 struct intel_memory_region *mem;
820
821 mem = intel_memory_region_create(i915,
822 intel_graphics_stolen_res.start,
823 resource_size(&intel_graphics_stolen_res),
824 PAGE_SIZE, 0, type, instance,
825 &i915_region_stolen_smem_ops);
826 if (IS_ERR(mem))
827 return mem;
828
829 intel_memory_region_set_name(mem, "stolen-system");
830
831 mem->private = true;
832 return mem;
833 }
834
835 struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private * i915,resource_size_t stolen_offset,resource_size_t size)836 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
837 resource_size_t stolen_offset,
838 resource_size_t size)
839 {
840 struct intel_memory_region *mem = i915->mm.stolen_region;
841 struct drm_i915_gem_object *obj;
842 struct drm_mm_node *stolen;
843 int ret;
844
845 if (!drm_mm_initialized(&i915->mm.stolen))
846 return ERR_PTR(-ENODEV);
847
848 drm_dbg(&i915->drm,
849 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
850 &stolen_offset, &size);
851
852 /* KISS and expect everything to be page-aligned */
853 if (GEM_WARN_ON(size == 0) ||
854 GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
855 GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
856 return ERR_PTR(-EINVAL);
857
858 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
859 if (!stolen)
860 return ERR_PTR(-ENOMEM);
861
862 stolen->start = stolen_offset;
863 stolen->size = size;
864 mutex_lock(&i915->mm.stolen_lock);
865 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
866 mutex_unlock(&i915->mm.stolen_lock);
867 if (ret)
868 goto err_free;
869
870 obj = i915_gem_object_alloc();
871 if (!obj) {
872 ret = -ENOMEM;
873 goto err_stolen;
874 }
875
876 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
877 if (ret)
878 goto err_object_free;
879
880 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
881 return obj;
882
883 err_object_free:
884 i915_gem_object_free(obj);
885 err_stolen:
886 i915_gem_stolen_remove_node(i915, stolen);
887 err_free:
888 kfree(stolen);
889 return ERR_PTR(ret);
890 }
891
i915_gem_object_is_stolen(const struct drm_i915_gem_object * obj)892 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
893 {
894 return obj->ops == &i915_gem_object_stolen_ops;
895 }
896