1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016 Intel Corporation
4 */
5
6 #include <drm/drm_print.h>
7
8 #include "gem/i915_gem_context.h"
9
10 #include "i915_drv.h"
11
12 #include "intel_breadcrumbs.h"
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_engine_user.h"
17 #include "intel_execlists_submission.h"
18 #include "intel_gt.h"
19 #include "intel_gt_requests.h"
20 #include "intel_gt_pm.h"
21 #include "intel_lrc_reg.h"
22 #include "intel_reset.h"
23 #include "intel_ring.h"
24 #include "uc/intel_guc_submission.h"
25
26 /* Haswell does have the CXT_SIZE register however it does not appear to be
27 * valid. Now, docs explain in dwords what is in the context object. The full
28 * size is 70720 bytes, however, the power context and execlist context will
29 * never be saved (power context is stored elsewhere, and execlists don't work
30 * on HSW) - so the final size, including the extra state required for the
31 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
32 */
33 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
34
35 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
36 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
37 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
38 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
39
40 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
41
42 #define MAX_MMIO_BASES 3
43 struct engine_info {
44 u8 class;
45 u8 instance;
46 /* mmio bases table *must* be sorted in reverse graphics_ver order */
47 struct engine_mmio_base {
48 u32 graphics_ver : 8;
49 u32 base : 24;
50 } mmio_bases[MAX_MMIO_BASES];
51 };
52
53 static const struct engine_info intel_engines[] = {
54 [RCS0] = {
55 .class = RENDER_CLASS,
56 .instance = 0,
57 .mmio_bases = {
58 { .graphics_ver = 1, .base = RENDER_RING_BASE }
59 },
60 },
61 [BCS0] = {
62 .class = COPY_ENGINE_CLASS,
63 .instance = 0,
64 .mmio_bases = {
65 { .graphics_ver = 6, .base = BLT_RING_BASE }
66 },
67 },
68 [VCS0] = {
69 .class = VIDEO_DECODE_CLASS,
70 .instance = 0,
71 .mmio_bases = {
72 { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
73 { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
74 { .graphics_ver = 4, .base = BSD_RING_BASE }
75 },
76 },
77 [VCS1] = {
78 .class = VIDEO_DECODE_CLASS,
79 .instance = 1,
80 .mmio_bases = {
81 { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
82 { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
83 },
84 },
85 [VCS2] = {
86 .class = VIDEO_DECODE_CLASS,
87 .instance = 2,
88 .mmio_bases = {
89 { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
90 },
91 },
92 [VCS3] = {
93 .class = VIDEO_DECODE_CLASS,
94 .instance = 3,
95 .mmio_bases = {
96 { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
97 },
98 },
99 [VCS4] = {
100 .class = VIDEO_DECODE_CLASS,
101 .instance = 4,
102 .mmio_bases = {
103 { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
104 },
105 },
106 [VCS5] = {
107 .class = VIDEO_DECODE_CLASS,
108 .instance = 5,
109 .mmio_bases = {
110 { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
111 },
112 },
113 [VCS6] = {
114 .class = VIDEO_DECODE_CLASS,
115 .instance = 6,
116 .mmio_bases = {
117 { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
118 },
119 },
120 [VCS7] = {
121 .class = VIDEO_DECODE_CLASS,
122 .instance = 7,
123 .mmio_bases = {
124 { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
125 },
126 },
127 [VECS0] = {
128 .class = VIDEO_ENHANCEMENT_CLASS,
129 .instance = 0,
130 .mmio_bases = {
131 { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
132 { .graphics_ver = 7, .base = VEBOX_RING_BASE }
133 },
134 },
135 [VECS1] = {
136 .class = VIDEO_ENHANCEMENT_CLASS,
137 .instance = 1,
138 .mmio_bases = {
139 { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
140 },
141 },
142 [VECS2] = {
143 .class = VIDEO_ENHANCEMENT_CLASS,
144 .instance = 2,
145 .mmio_bases = {
146 { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
147 },
148 },
149 [VECS3] = {
150 .class = VIDEO_ENHANCEMENT_CLASS,
151 .instance = 3,
152 .mmio_bases = {
153 { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
154 },
155 },
156 };
157
158 /**
159 * intel_engine_context_size() - return the size of the context for an engine
160 * @gt: the gt
161 * @class: engine class
162 *
163 * Each engine class may require a different amount of space for a context
164 * image.
165 *
166 * Return: size (in bytes) of an engine class specific context image
167 *
168 * Note: this size includes the HWSP, which is part of the context image
169 * in LRC mode, but does not include the "shared data page" used with
170 * GuC submission. The caller should account for this if using the GuC.
171 */
intel_engine_context_size(struct intel_gt * gt,u8 class)172 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
173 {
174 struct intel_uncore *uncore = gt->uncore;
175 u32 cxt_size;
176
177 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
178
179 switch (class) {
180 case RENDER_CLASS:
181 switch (GRAPHICS_VER(gt->i915)) {
182 default:
183 MISSING_CASE(GRAPHICS_VER(gt->i915));
184 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
185 case 12:
186 case 11:
187 return GEN11_LR_CONTEXT_RENDER_SIZE;
188 case 9:
189 return GEN9_LR_CONTEXT_RENDER_SIZE;
190 case 8:
191 return GEN8_LR_CONTEXT_RENDER_SIZE;
192 case 7:
193 if (IS_HASWELL(gt->i915))
194 return HSW_CXT_TOTAL_SIZE;
195
196 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
197 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
198 PAGE_SIZE);
199 case 6:
200 cxt_size = intel_uncore_read(uncore, CXT_SIZE);
201 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
202 PAGE_SIZE);
203 case 5:
204 case 4:
205 /*
206 * There is a discrepancy here between the size reported
207 * by the register and the size of the context layout
208 * in the docs. Both are described as authorative!
209 *
210 * The discrepancy is on the order of a few cachelines,
211 * but the total is under one page (4k), which is our
212 * minimum allocation anyway so it should all come
213 * out in the wash.
214 */
215 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
216 drm_dbg(>->i915->drm,
217 "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
218 GRAPHICS_VER(gt->i915), cxt_size * 64,
219 cxt_size - 1);
220 return round_up(cxt_size * 64, PAGE_SIZE);
221 case 3:
222 case 2:
223 /* For the special day when i810 gets merged. */
224 case 1:
225 return 0;
226 }
227 break;
228 default:
229 MISSING_CASE(class);
230 fallthrough;
231 case VIDEO_DECODE_CLASS:
232 case VIDEO_ENHANCEMENT_CLASS:
233 case COPY_ENGINE_CLASS:
234 if (GRAPHICS_VER(gt->i915) < 8)
235 return 0;
236 return GEN8_LR_CONTEXT_OTHER_SIZE;
237 }
238 }
239
__engine_mmio_base(struct drm_i915_private * i915,const struct engine_mmio_base * bases)240 static u32 __engine_mmio_base(struct drm_i915_private *i915,
241 const struct engine_mmio_base *bases)
242 {
243 int i;
244
245 for (i = 0; i < MAX_MMIO_BASES; i++)
246 if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
247 break;
248
249 GEM_BUG_ON(i == MAX_MMIO_BASES);
250 GEM_BUG_ON(!bases[i].base);
251
252 return bases[i].base;
253 }
254
__sprint_engine_name(struct intel_engine_cs * engine)255 static void __sprint_engine_name(struct intel_engine_cs *engine)
256 {
257 /*
258 * Before we know what the uABI name for this engine will be,
259 * we still would like to keep track of this engine in the debug logs.
260 * We throw in a ' here as a reminder that this isn't its final name.
261 */
262 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
263 intel_engine_class_repr(engine->class),
264 engine->instance) >= sizeof(engine->name));
265 }
266
intel_engine_set_hwsp_writemask(struct intel_engine_cs * engine,u32 mask)267 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
268 {
269 /*
270 * Though they added more rings on g4x/ilk, they did not add
271 * per-engine HWSTAM until gen6.
272 */
273 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
274 return;
275
276 if (GRAPHICS_VER(engine->i915) >= 3)
277 ENGINE_WRITE(engine, RING_HWSTAM, mask);
278 else
279 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
280 }
281
intel_engine_sanitize_mmio(struct intel_engine_cs * engine)282 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
283 {
284 /* Mask off all writes into the unknown HWSP */
285 intel_engine_set_hwsp_writemask(engine, ~0u);
286 }
287
nop_irq_handler(struct intel_engine_cs * engine,u16 iir)288 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
289 {
290 GEM_DEBUG_WARN_ON(iir);
291 }
292
intel_engine_setup(struct intel_gt * gt,enum intel_engine_id id,u8 logical_instance)293 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
294 u8 logical_instance)
295 {
296 const struct engine_info *info = &intel_engines[id];
297 struct drm_i915_private *i915 = gt->i915;
298 struct intel_engine_cs *engine;
299 u8 guc_class;
300
301 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
302 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
303 BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
304 BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
305
306 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
307 return -EINVAL;
308
309 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
310 return -EINVAL;
311
312 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
313 return -EINVAL;
314
315 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
316 return -EINVAL;
317
318 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
319 if (!engine)
320 return -ENOMEM;
321
322 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
323
324 INIT_LIST_HEAD(&engine->pinned_contexts_list);
325 engine->id = id;
326 engine->legacy_idx = INVALID_ENGINE;
327 engine->mask = BIT(id);
328 engine->i915 = i915;
329 engine->gt = gt;
330 engine->uncore = gt->uncore;
331 guc_class = engine_class_to_guc_class(info->class);
332 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
333 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
334
335 engine->irq_handler = nop_irq_handler;
336
337 engine->class = info->class;
338 engine->instance = info->instance;
339 engine->logical_mask = BIT(logical_instance);
340 __sprint_engine_name(engine);
341
342 engine->props.heartbeat_interval_ms =
343 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
344 engine->props.max_busywait_duration_ns =
345 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
346 engine->props.preempt_timeout_ms =
347 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
348 engine->props.stop_timeout_ms =
349 CONFIG_DRM_I915_STOP_TIMEOUT;
350 engine->props.timeslice_duration_ms =
351 CONFIG_DRM_I915_TIMESLICE_DURATION;
352
353 /* Override to uninterruptible for OpenCL workloads. */
354 if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
355 engine->props.preempt_timeout_ms = 0;
356
357 engine->defaults = engine->props; /* never to change again */
358
359 engine->context_size = intel_engine_context_size(gt, engine->class);
360 if (WARN_ON(engine->context_size > BIT(20)))
361 engine->context_size = 0;
362 if (engine->context_size)
363 DRIVER_CAPS(i915)->has_logical_contexts = true;
364
365 ewma__engine_latency_init(&engine->latency);
366 seqcount_init(&engine->stats.lock);
367
368 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
369
370 /* Scrub mmio state on takeover */
371 intel_engine_sanitize_mmio(engine);
372
373 gt->engine_class[info->class][info->instance] = engine;
374 gt->engine[id] = engine;
375
376 return 0;
377 }
378
__setup_engine_capabilities(struct intel_engine_cs * engine)379 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
380 {
381 struct drm_i915_private *i915 = engine->i915;
382
383 if (engine->class == VIDEO_DECODE_CLASS) {
384 /*
385 * HEVC support is present on first engine instance
386 * before Gen11 and on all instances afterwards.
387 */
388 if (GRAPHICS_VER(i915) >= 11 ||
389 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
390 engine->uabi_capabilities |=
391 I915_VIDEO_CLASS_CAPABILITY_HEVC;
392
393 /*
394 * SFC block is present only on even logical engine
395 * instances.
396 */
397 if ((GRAPHICS_VER(i915) >= 11 &&
398 (engine->gt->info.vdbox_sfc_access &
399 BIT(engine->instance))) ||
400 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
401 engine->uabi_capabilities |=
402 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
403 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
404 if (GRAPHICS_VER(i915) >= 9 &&
405 engine->gt->info.sfc_mask & BIT(engine->instance))
406 engine->uabi_capabilities |=
407 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
408 }
409 }
410
intel_setup_engine_capabilities(struct intel_gt * gt)411 static void intel_setup_engine_capabilities(struct intel_gt *gt)
412 {
413 struct intel_engine_cs *engine;
414 enum intel_engine_id id;
415
416 for_each_engine(engine, gt, id)
417 __setup_engine_capabilities(engine);
418 }
419
420 /**
421 * intel_engines_release() - free the resources allocated for Command Streamers
422 * @gt: pointer to struct intel_gt
423 */
intel_engines_release(struct intel_gt * gt)424 void intel_engines_release(struct intel_gt *gt)
425 {
426 struct intel_engine_cs *engine;
427 enum intel_engine_id id;
428
429 /*
430 * Before we release the resources held by engine, we must be certain
431 * that the HW is no longer accessing them -- having the GPU scribble
432 * to or read from a page being used for something else causes no end
433 * of fun.
434 *
435 * The GPU should be reset by this point, but assume the worst just
436 * in case we aborted before completely initialising the engines.
437 */
438 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
439 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
440 __intel_gt_reset(gt, ALL_ENGINES);
441
442 /* Decouple the backend; but keep the layout for late GPU resets */
443 for_each_engine(engine, gt, id) {
444 if (!engine->release)
445 continue;
446
447 intel_wakeref_wait_for_idle(&engine->wakeref);
448 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
449
450 engine->release(engine);
451 engine->release = NULL;
452
453 memset(&engine->reset, 0, sizeof(engine->reset));
454 }
455 }
456
intel_engine_free_request_pool(struct intel_engine_cs * engine)457 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
458 {
459 if (!engine->request_pool)
460 return;
461
462 kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
463 }
464
intel_engines_free(struct intel_gt * gt)465 void intel_engines_free(struct intel_gt *gt)
466 {
467 struct intel_engine_cs *engine;
468 enum intel_engine_id id;
469
470 /* Free the requests! dma-resv keeps fences around for an eternity */
471 rcu_barrier();
472
473 for_each_engine(engine, gt, id) {
474 intel_engine_free_request_pool(engine);
475 kfree(engine);
476 gt->engine[id] = NULL;
477 }
478 }
479
480 static
gen11_vdbox_has_sfc(struct intel_gt * gt,unsigned int physical_vdbox,unsigned int logical_vdbox,u16 vdbox_mask)481 bool gen11_vdbox_has_sfc(struct intel_gt *gt,
482 unsigned int physical_vdbox,
483 unsigned int logical_vdbox, u16 vdbox_mask)
484 {
485 struct drm_i915_private *i915 = gt->i915;
486
487 /*
488 * In Gen11, only even numbered logical VDBOXes are hooked
489 * up to an SFC (Scaler & Format Converter) unit.
490 * In Gen12, Even numbered physical instance always are connected
491 * to an SFC. Odd numbered physical instances have SFC only if
492 * previous even instance is fused off.
493 *
494 * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
495 * in the fuse register that tells us whether a specific SFC is present.
496 */
497 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
498 return false;
499 else if (GRAPHICS_VER(i915) == 12)
500 return (physical_vdbox % 2 == 0) ||
501 !(BIT(physical_vdbox - 1) & vdbox_mask);
502 else if (GRAPHICS_VER(i915) == 11)
503 return logical_vdbox % 2 == 0;
504
505 MISSING_CASE(GRAPHICS_VER(i915));
506 return false;
507 }
508
509 /*
510 * Determine which engines are fused off in our particular hardware.
511 * Note that we have a catch-22 situation where we need to be able to access
512 * the blitter forcewake domain to read the engine fuses, but at the same time
513 * we need to know which engines are available on the system to know which
514 * forcewake domains are present. We solve this by intializing the forcewake
515 * domains based on the full engine mask in the platform capabilities before
516 * calling this function and pruning the domains for fused-off engines
517 * afterwards.
518 */
init_engine_mask(struct intel_gt * gt)519 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
520 {
521 struct drm_i915_private *i915 = gt->i915;
522 struct intel_gt_info *info = >->info;
523 struct intel_uncore *uncore = gt->uncore;
524 unsigned int logical_vdbox = 0;
525 unsigned int i;
526 u32 media_fuse, fuse1;
527 u16 vdbox_mask;
528 u16 vebox_mask;
529
530 info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
531
532 if (GRAPHICS_VER(i915) < 11)
533 return info->engine_mask;
534
535 /*
536 * On newer platforms the fusing register is called 'enable' and has
537 * enable semantics, while on older platforms it is called 'disable'
538 * and bits have disable semantices.
539 */
540 media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
541 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
542 media_fuse = ~media_fuse;
543
544 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
545 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
546 GEN11_GT_VEBOX_DISABLE_SHIFT;
547
548 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
549 fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
550 gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
551 } else {
552 gt->info.sfc_mask = ~0;
553 }
554
555 for (i = 0; i < I915_MAX_VCS; i++) {
556 if (!HAS_ENGINE(gt, _VCS(i))) {
557 vdbox_mask &= ~BIT(i);
558 continue;
559 }
560
561 if (!(BIT(i) & vdbox_mask)) {
562 info->engine_mask &= ~BIT(_VCS(i));
563 drm_dbg(&i915->drm, "vcs%u fused off\n", i);
564 continue;
565 }
566
567 if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
568 gt->info.vdbox_sfc_access |= BIT(i);
569 logical_vdbox++;
570 }
571 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
572 vdbox_mask, VDBOX_MASK(gt));
573 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
574
575 for (i = 0; i < I915_MAX_VECS; i++) {
576 if (!HAS_ENGINE(gt, _VECS(i))) {
577 vebox_mask &= ~BIT(i);
578 continue;
579 }
580
581 if (!(BIT(i) & vebox_mask)) {
582 info->engine_mask &= ~BIT(_VECS(i));
583 drm_dbg(&i915->drm, "vecs%u fused off\n", i);
584 }
585 }
586 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
587 vebox_mask, VEBOX_MASK(gt));
588 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
589
590 return info->engine_mask;
591 }
592
populate_logical_ids(struct intel_gt * gt,u8 * logical_ids,u8 class,const u8 * map,u8 num_instances)593 static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
594 u8 class, const u8 *map, u8 num_instances)
595 {
596 int i, j;
597 u8 current_logical_id = 0;
598
599 for (j = 0; j < num_instances; ++j) {
600 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
601 if (!HAS_ENGINE(gt, i) ||
602 intel_engines[i].class != class)
603 continue;
604
605 if (intel_engines[i].instance == map[j]) {
606 logical_ids[intel_engines[i].instance] =
607 current_logical_id++;
608 break;
609 }
610 }
611 }
612 }
613
setup_logical_ids(struct intel_gt * gt,u8 * logical_ids,u8 class)614 static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
615 {
616 int i;
617 u8 map[MAX_ENGINE_INSTANCE + 1];
618
619 for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
620 map[i] = i;
621 populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map));
622 }
623
624 /**
625 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
626 * @gt: pointer to struct intel_gt
627 *
628 * Return: non-zero if the initialization failed.
629 */
intel_engines_init_mmio(struct intel_gt * gt)630 int intel_engines_init_mmio(struct intel_gt *gt)
631 {
632 struct drm_i915_private *i915 = gt->i915;
633 const unsigned int engine_mask = init_engine_mask(gt);
634 unsigned int mask = 0;
635 unsigned int i, class;
636 u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
637 int err;
638
639 drm_WARN_ON(&i915->drm, engine_mask == 0);
640 drm_WARN_ON(&i915->drm, engine_mask &
641 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
642
643 if (i915_inject_probe_failure(i915))
644 return -ENODEV;
645
646 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
647 setup_logical_ids(gt, logical_ids, class);
648
649 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
650 u8 instance = intel_engines[i].instance;
651
652 if (intel_engines[i].class != class ||
653 !HAS_ENGINE(gt, i))
654 continue;
655
656 err = intel_engine_setup(gt, i,
657 logical_ids[instance]);
658 if (err)
659 goto cleanup;
660
661 mask |= BIT(i);
662 }
663 }
664
665 /*
666 * Catch failures to update intel_engines table when the new engines
667 * are added to the driver by a warning and disabling the forgotten
668 * engines.
669 */
670 if (drm_WARN_ON(&i915->drm, mask != engine_mask))
671 gt->info.engine_mask = mask;
672
673 gt->info.num_engines = hweight32(mask);
674
675 intel_gt_check_and_clear_faults(gt);
676
677 intel_setup_engine_capabilities(gt);
678
679 intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
680
681 return 0;
682
683 cleanup:
684 intel_engines_free(gt);
685 return err;
686 }
687
intel_engine_init_execlists(struct intel_engine_cs * engine)688 void intel_engine_init_execlists(struct intel_engine_cs *engine)
689 {
690 struct intel_engine_execlists * const execlists = &engine->execlists;
691
692 execlists->port_mask = 1;
693 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
694 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
695
696 memset(execlists->pending, 0, sizeof(execlists->pending));
697 execlists->active =
698 memset(execlists->inflight, 0, sizeof(execlists->inflight));
699 }
700
cleanup_status_page(struct intel_engine_cs * engine)701 static void cleanup_status_page(struct intel_engine_cs *engine)
702 {
703 struct i915_vma *vma;
704
705 /* Prevent writes into HWSP after returning the page to the system */
706 intel_engine_set_hwsp_writemask(engine, ~0u);
707
708 vma = fetch_and_zero(&engine->status_page.vma);
709 if (!vma)
710 return;
711
712 if (!HWS_NEEDS_PHYSICAL(engine->i915))
713 i915_vma_unpin(vma);
714
715 i915_gem_object_unpin_map(vma->obj);
716 i915_gem_object_put(vma->obj);
717 }
718
pin_ggtt_status_page(struct intel_engine_cs * engine,struct i915_gem_ww_ctx * ww,struct i915_vma * vma)719 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
720 struct i915_gem_ww_ctx *ww,
721 struct i915_vma *vma)
722 {
723 unsigned int flags;
724
725 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
726 /*
727 * On g33, we cannot place HWS above 256MiB, so
728 * restrict its pinning to the low mappable arena.
729 * Though this restriction is not documented for
730 * gen4, gen5, or byt, they also behave similarly
731 * and hang if the HWS is placed at the top of the
732 * GTT. To generalise, it appears that all !llc
733 * platforms have issues with us placing the HWS
734 * above the mappable region (even though we never
735 * actually map it).
736 */
737 flags = PIN_MAPPABLE;
738 else
739 flags = PIN_HIGH;
740
741 return i915_ggtt_pin(vma, ww, 0, flags);
742 }
743
init_status_page(struct intel_engine_cs * engine)744 static int init_status_page(struct intel_engine_cs *engine)
745 {
746 struct drm_i915_gem_object *obj;
747 struct i915_gem_ww_ctx ww;
748 struct i915_vma *vma;
749 void *vaddr;
750 int ret;
751
752 INIT_LIST_HEAD(&engine->status_page.timelines);
753
754 /*
755 * Though the HWS register does support 36bit addresses, historically
756 * we have had hangs and corruption reported due to wild writes if
757 * the HWS is placed above 4G. We only allow objects to be allocated
758 * in GFP_DMA32 for i965, and no earlier physical address users had
759 * access to more than 4G.
760 */
761 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
762 if (IS_ERR(obj)) {
763 drm_err(&engine->i915->drm,
764 "Failed to allocate status page\n");
765 return PTR_ERR(obj);
766 }
767
768 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
769
770 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
771 if (IS_ERR(vma)) {
772 ret = PTR_ERR(vma);
773 goto err_put;
774 }
775
776 i915_gem_ww_ctx_init(&ww, true);
777 retry:
778 ret = i915_gem_object_lock(obj, &ww);
779 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
780 ret = pin_ggtt_status_page(engine, &ww, vma);
781 if (ret)
782 goto err;
783
784 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
785 if (IS_ERR(vaddr)) {
786 ret = PTR_ERR(vaddr);
787 goto err_unpin;
788 }
789
790 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
791 engine->status_page.vma = vma;
792
793 err_unpin:
794 if (ret)
795 i915_vma_unpin(vma);
796 err:
797 if (ret == -EDEADLK) {
798 ret = i915_gem_ww_ctx_backoff(&ww);
799 if (!ret)
800 goto retry;
801 }
802 i915_gem_ww_ctx_fini(&ww);
803 err_put:
804 if (ret)
805 i915_gem_object_put(obj);
806 return ret;
807 }
808
engine_setup_common(struct intel_engine_cs * engine)809 static int engine_setup_common(struct intel_engine_cs *engine)
810 {
811 int err;
812
813 init_llist_head(&engine->barrier_tasks);
814
815 err = init_status_page(engine);
816 if (err)
817 return err;
818
819 engine->breadcrumbs = intel_breadcrumbs_create(engine);
820 if (!engine->breadcrumbs) {
821 err = -ENOMEM;
822 goto err_status;
823 }
824
825 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
826 if (!engine->sched_engine) {
827 err = -ENOMEM;
828 goto err_sched_engine;
829 }
830 engine->sched_engine->private_data = engine;
831
832 err = intel_engine_init_cmd_parser(engine);
833 if (err)
834 goto err_cmd_parser;
835
836 intel_engine_init_execlists(engine);
837 intel_engine_init__pm(engine);
838 intel_engine_init_retire(engine);
839
840 /* Use the whole device by default */
841 engine->sseu =
842 intel_sseu_from_device_info(&engine->gt->info.sseu);
843
844 intel_engine_init_workarounds(engine);
845 intel_engine_init_whitelist(engine);
846 intel_engine_init_ctx_wa(engine);
847
848 if (GRAPHICS_VER(engine->i915) >= 12)
849 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
850
851 return 0;
852
853 err_cmd_parser:
854 i915_sched_engine_put(engine->sched_engine);
855 err_sched_engine:
856 intel_breadcrumbs_put(engine->breadcrumbs);
857 err_status:
858 cleanup_status_page(engine);
859 return err;
860 }
861
862 struct measure_breadcrumb {
863 struct i915_request rq;
864 struct intel_ring ring;
865 u32 cs[2048];
866 };
867
measure_breadcrumb_dw(struct intel_context * ce)868 static int measure_breadcrumb_dw(struct intel_context *ce)
869 {
870 struct intel_engine_cs *engine = ce->engine;
871 struct measure_breadcrumb *frame;
872 int dw;
873
874 GEM_BUG_ON(!engine->gt->scratch);
875
876 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
877 if (!frame)
878 return -ENOMEM;
879
880 frame->rq.engine = engine;
881 frame->rq.context = ce;
882 rcu_assign_pointer(frame->rq.timeline, ce->timeline);
883 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
884
885 frame->ring.vaddr = frame->cs;
886 frame->ring.size = sizeof(frame->cs);
887 frame->ring.wrap =
888 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
889 frame->ring.effective_size = frame->ring.size;
890 intel_ring_update_space(&frame->ring);
891 frame->rq.ring = &frame->ring;
892
893 mutex_lock(&ce->timeline->mutex);
894 spin_lock_irq(&engine->sched_engine->lock);
895
896 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
897
898 spin_unlock_irq(&engine->sched_engine->lock);
899 mutex_unlock(&ce->timeline->mutex);
900
901 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
902
903 kfree(frame);
904 return dw;
905 }
906
907 struct intel_context *
intel_engine_create_pinned_context(struct intel_engine_cs * engine,struct i915_address_space * vm,unsigned int ring_size,unsigned int hwsp,struct lock_class_key * key,const char * name)908 intel_engine_create_pinned_context(struct intel_engine_cs *engine,
909 struct i915_address_space *vm,
910 unsigned int ring_size,
911 unsigned int hwsp,
912 struct lock_class_key *key,
913 const char *name)
914 {
915 struct intel_context *ce;
916 int err;
917
918 ce = intel_context_create(engine);
919 if (IS_ERR(ce))
920 return ce;
921
922 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
923 ce->timeline = page_pack_bits(NULL, hwsp);
924 ce->ring = NULL;
925 ce->ring_size = ring_size;
926
927 i915_vm_put(ce->vm);
928 ce->vm = i915_vm_get(vm);
929
930 err = intel_context_pin(ce); /* perma-pin so it is always available */
931 if (err) {
932 intel_context_put(ce);
933 return ERR_PTR(err);
934 }
935
936 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
937
938 /*
939 * Give our perma-pinned kernel timelines a separate lockdep class,
940 * so that we can use them from within the normal user timelines
941 * should we need to inject GPU operations during their request
942 * construction.
943 */
944 lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
945
946 return ce;
947 }
948
intel_engine_destroy_pinned_context(struct intel_context * ce)949 void intel_engine_destroy_pinned_context(struct intel_context *ce)
950 {
951 struct intel_engine_cs *engine = ce->engine;
952 struct i915_vma *hwsp = engine->status_page.vma;
953
954 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
955
956 mutex_lock(&hwsp->vm->mutex);
957 list_del(&ce->timeline->engine_link);
958 mutex_unlock(&hwsp->vm->mutex);
959
960 list_del(&ce->pinned_contexts_link);
961 intel_context_unpin(ce);
962 intel_context_put(ce);
963 }
964
965 static struct intel_context *
create_kernel_context(struct intel_engine_cs * engine)966 create_kernel_context(struct intel_engine_cs *engine)
967 {
968 static struct lock_class_key kernel;
969
970 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
971 I915_GEM_HWS_SEQNO_ADDR,
972 &kernel, "kernel_context");
973 }
974
975 /**
976 * intel_engines_init_common - initialize cengine state which might require hw access
977 * @engine: Engine to initialize.
978 *
979 * Initializes @engine@ structure members shared between legacy and execlists
980 * submission modes which do require hardware access.
981 *
982 * Typcally done at later stages of submission mode specific engine setup.
983 *
984 * Returns zero on success or an error code on failure.
985 */
engine_init_common(struct intel_engine_cs * engine)986 static int engine_init_common(struct intel_engine_cs *engine)
987 {
988 struct intel_context *ce;
989 int ret;
990
991 engine->set_default_submission(engine);
992
993 /*
994 * We may need to do things with the shrinker which
995 * require us to immediately switch back to the default
996 * context. This can cause a problem as pinning the
997 * default context also requires GTT space which may not
998 * be available. To avoid this we always pin the default
999 * context.
1000 */
1001 ce = create_kernel_context(engine);
1002 if (IS_ERR(ce))
1003 return PTR_ERR(ce);
1004
1005 ret = measure_breadcrumb_dw(ce);
1006 if (ret < 0)
1007 goto err_context;
1008
1009 engine->emit_fini_breadcrumb_dw = ret;
1010 engine->kernel_context = ce;
1011
1012 return 0;
1013
1014 err_context:
1015 intel_engine_destroy_pinned_context(ce);
1016 return ret;
1017 }
1018
intel_engines_init(struct intel_gt * gt)1019 int intel_engines_init(struct intel_gt *gt)
1020 {
1021 int (*setup)(struct intel_engine_cs *engine);
1022 struct intel_engine_cs *engine;
1023 enum intel_engine_id id;
1024 int err;
1025
1026 if (intel_uc_uses_guc_submission(>->uc)) {
1027 gt->submission_method = INTEL_SUBMISSION_GUC;
1028 setup = intel_guc_submission_setup;
1029 } else if (HAS_EXECLISTS(gt->i915)) {
1030 gt->submission_method = INTEL_SUBMISSION_ELSP;
1031 setup = intel_execlists_submission_setup;
1032 } else {
1033 gt->submission_method = INTEL_SUBMISSION_RING;
1034 setup = intel_ring_submission_setup;
1035 }
1036
1037 for_each_engine(engine, gt, id) {
1038 err = engine_setup_common(engine);
1039 if (err)
1040 return err;
1041
1042 err = setup(engine);
1043 if (err)
1044 return err;
1045
1046 err = engine_init_common(engine);
1047 if (err)
1048 return err;
1049
1050 intel_engine_add_user(engine);
1051 }
1052
1053 return 0;
1054 }
1055
1056 /**
1057 * intel_engines_cleanup_common - cleans up the engine state created by
1058 * the common initiailizers.
1059 * @engine: Engine to cleanup.
1060 *
1061 * This cleans up everything created by the common helpers.
1062 */
intel_engine_cleanup_common(struct intel_engine_cs * engine)1063 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
1064 {
1065 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1066
1067 i915_sched_engine_put(engine->sched_engine);
1068 intel_breadcrumbs_put(engine->breadcrumbs);
1069
1070 intel_engine_fini_retire(engine);
1071 intel_engine_cleanup_cmd_parser(engine);
1072
1073 if (engine->default_state)
1074 fput(engine->default_state);
1075
1076 if (engine->kernel_context)
1077 intel_engine_destroy_pinned_context(engine->kernel_context);
1078
1079 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
1080 cleanup_status_page(engine);
1081
1082 intel_wa_list_free(&engine->ctx_wa_list);
1083 intel_wa_list_free(&engine->wa_list);
1084 intel_wa_list_free(&engine->whitelist);
1085 }
1086
1087 /**
1088 * intel_engine_resume - re-initializes the HW state of the engine
1089 * @engine: Engine to resume.
1090 *
1091 * Returns zero on success or an error code on failure.
1092 */
intel_engine_resume(struct intel_engine_cs * engine)1093 int intel_engine_resume(struct intel_engine_cs *engine)
1094 {
1095 intel_engine_apply_workarounds(engine);
1096 intel_engine_apply_whitelist(engine);
1097
1098 return engine->resume(engine);
1099 }
1100
intel_engine_get_active_head(const struct intel_engine_cs * engine)1101 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1102 {
1103 struct drm_i915_private *i915 = engine->i915;
1104
1105 u64 acthd;
1106
1107 if (GRAPHICS_VER(i915) >= 8)
1108 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
1109 else if (GRAPHICS_VER(i915) >= 4)
1110 acthd = ENGINE_READ(engine, RING_ACTHD);
1111 else
1112 acthd = ENGINE_READ(engine, ACTHD);
1113
1114 return acthd;
1115 }
1116
intel_engine_get_last_batch_head(const struct intel_engine_cs * engine)1117 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1118 {
1119 u64 bbaddr;
1120
1121 if (GRAPHICS_VER(engine->i915) >= 8)
1122 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1123 else
1124 bbaddr = ENGINE_READ(engine, RING_BBADDR);
1125
1126 return bbaddr;
1127 }
1128
stop_timeout(const struct intel_engine_cs * engine)1129 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1130 {
1131 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
1132 return 0;
1133
1134 /*
1135 * If we are doing a normal GPU reset, we can take our time and allow
1136 * the engine to quiesce. We've stopped submission to the engine, and
1137 * if we wait long enough an innocent context should complete and
1138 * leave the engine idle. So they should not be caught unaware by
1139 * the forthcoming GPU reset (which usually follows the stop_cs)!
1140 */
1141 return READ_ONCE(engine->props.stop_timeout_ms);
1142 }
1143
__intel_engine_stop_cs(struct intel_engine_cs * engine,int fast_timeout_us,int slow_timeout_ms)1144 static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1145 int fast_timeout_us,
1146 int slow_timeout_ms)
1147 {
1148 struct intel_uncore *uncore = engine->uncore;
1149 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1150 int err;
1151
1152 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
1153 err = __intel_wait_for_register_fw(engine->uncore, mode,
1154 MODE_IDLE, MODE_IDLE,
1155 fast_timeout_us,
1156 slow_timeout_ms,
1157 NULL);
1158
1159 /* A final mmio read to let GPU writes be hopefully flushed to memory */
1160 intel_uncore_posting_read_fw(uncore, mode);
1161 return err;
1162 }
1163
intel_engine_stop_cs(struct intel_engine_cs * engine)1164 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1165 {
1166 int err = 0;
1167
1168 if (GRAPHICS_VER(engine->i915) < 3)
1169 return -ENODEV;
1170
1171 ENGINE_TRACE(engine, "\n");
1172 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1173 ENGINE_TRACE(engine,
1174 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
1175 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1176 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1177
1178 /*
1179 * Sometimes we observe that the idle flag is not
1180 * set even though the ring is empty. So double
1181 * check before giving up.
1182 */
1183 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1184 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1185 err = -ETIMEDOUT;
1186 }
1187
1188 return err;
1189 }
1190
intel_engine_cancel_stop_cs(struct intel_engine_cs * engine)1191 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1192 {
1193 ENGINE_TRACE(engine, "\n");
1194
1195 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1196 }
1197
i915_cache_level_str(struct drm_i915_private * i915,int type)1198 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1199 {
1200 switch (type) {
1201 case I915_CACHE_NONE: return " uncached";
1202 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1203 case I915_CACHE_L3_LLC: return " L3+LLC";
1204 case I915_CACHE_WT: return " WT";
1205 default: return "";
1206 }
1207 }
1208
1209 static u32
read_subslice_reg(const struct intel_engine_cs * engine,int slice,int subslice,i915_reg_t reg)1210 read_subslice_reg(const struct intel_engine_cs *engine,
1211 int slice, int subslice, i915_reg_t reg)
1212 {
1213 return intel_uncore_read_with_mcr_steering(engine->uncore, reg,
1214 slice, subslice);
1215 }
1216
1217 /* NB: please notice the memset */
intel_engine_get_instdone(const struct intel_engine_cs * engine,struct intel_instdone * instdone)1218 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1219 struct intel_instdone *instdone)
1220 {
1221 struct drm_i915_private *i915 = engine->i915;
1222 const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
1223 struct intel_uncore *uncore = engine->uncore;
1224 u32 mmio_base = engine->mmio_base;
1225 int slice;
1226 int subslice;
1227 int iter;
1228
1229 memset(instdone, 0, sizeof(*instdone));
1230
1231 if (GRAPHICS_VER(i915) >= 8) {
1232 instdone->instdone =
1233 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1234
1235 if (engine->id != RCS0)
1236 return;
1237
1238 instdone->slice_common =
1239 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1240 if (GRAPHICS_VER(i915) >= 12) {
1241 instdone->slice_common_extra[0] =
1242 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1243 instdone->slice_common_extra[1] =
1244 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1245 }
1246
1247 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1248 for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
1249 instdone->sampler[slice][subslice] =
1250 read_subslice_reg(engine, slice, subslice,
1251 GEN7_SAMPLER_INSTDONE);
1252 instdone->row[slice][subslice] =
1253 read_subslice_reg(engine, slice, subslice,
1254 GEN7_ROW_INSTDONE);
1255 }
1256 } else {
1257 for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
1258 instdone->sampler[slice][subslice] =
1259 read_subslice_reg(engine, slice, subslice,
1260 GEN7_SAMPLER_INSTDONE);
1261 instdone->row[slice][subslice] =
1262 read_subslice_reg(engine, slice, subslice,
1263 GEN7_ROW_INSTDONE);
1264 }
1265 }
1266
1267 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
1268 for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice)
1269 instdone->geom_svg[slice][subslice] =
1270 read_subslice_reg(engine, slice, subslice,
1271 XEHPG_INSTDONE_GEOM_SVG);
1272 }
1273 } else if (GRAPHICS_VER(i915) >= 7) {
1274 instdone->instdone =
1275 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1276
1277 if (engine->id != RCS0)
1278 return;
1279
1280 instdone->slice_common =
1281 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1282 instdone->sampler[0][0] =
1283 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1284 instdone->row[0][0] =
1285 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1286 } else if (GRAPHICS_VER(i915) >= 4) {
1287 instdone->instdone =
1288 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1289 if (engine->id == RCS0)
1290 /* HACK: Using the wrong struct member */
1291 instdone->slice_common =
1292 intel_uncore_read(uncore, GEN4_INSTDONE1);
1293 } else {
1294 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1295 }
1296 }
1297
ring_is_idle(struct intel_engine_cs * engine)1298 static bool ring_is_idle(struct intel_engine_cs *engine)
1299 {
1300 bool idle = true;
1301
1302 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1303 return true;
1304
1305 if (!intel_engine_pm_get_if_awake(engine))
1306 return true;
1307
1308 /* First check that no commands are left in the ring */
1309 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1310 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1311 idle = false;
1312
1313 /* No bit for gen2, so assume the CS parser is idle */
1314 if (GRAPHICS_VER(engine->i915) > 2 &&
1315 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1316 idle = false;
1317
1318 intel_engine_pm_put(engine);
1319
1320 return idle;
1321 }
1322
__intel_engine_flush_submission(struct intel_engine_cs * engine,bool sync)1323 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1324 {
1325 struct tasklet_struct *t = &engine->sched_engine->tasklet;
1326
1327 if (!t->callback)
1328 return;
1329
1330 local_bh_disable();
1331 if (tasklet_trylock(t)) {
1332 /* Must wait for any GPU reset in progress. */
1333 if (__tasklet_is_enabled(t))
1334 t->callback(t);
1335 tasklet_unlock(t);
1336 }
1337 local_bh_enable();
1338
1339 /* Synchronise and wait for the tasklet on another CPU */
1340 if (sync)
1341 tasklet_unlock_wait(t);
1342 }
1343
1344 /**
1345 * intel_engine_is_idle() - Report if the engine has finished process all work
1346 * @engine: the intel_engine_cs
1347 *
1348 * Return true if there are no requests pending, nothing left to be submitted
1349 * to hardware, and that the engine is idle.
1350 */
intel_engine_is_idle(struct intel_engine_cs * engine)1351 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1352 {
1353 /* More white lies, if wedged, hw state is inconsistent */
1354 if (intel_gt_is_wedged(engine->gt))
1355 return true;
1356
1357 if (!intel_engine_pm_is_awake(engine))
1358 return true;
1359
1360 /* Waiting to drain ELSP? */
1361 intel_synchronize_hardirq(engine->i915);
1362 intel_engine_flush_submission(engine);
1363
1364 /* ELSP is empty, but there are ready requests? E.g. after reset */
1365 if (!i915_sched_engine_is_empty(engine->sched_engine))
1366 return false;
1367
1368 /* Ring stopped? */
1369 return ring_is_idle(engine);
1370 }
1371
intel_engines_are_idle(struct intel_gt * gt)1372 bool intel_engines_are_idle(struct intel_gt *gt)
1373 {
1374 struct intel_engine_cs *engine;
1375 enum intel_engine_id id;
1376
1377 /*
1378 * If the driver is wedged, HW state may be very inconsistent and
1379 * report that it is still busy, even though we have stopped using it.
1380 */
1381 if (intel_gt_is_wedged(gt))
1382 return true;
1383
1384 /* Already parked (and passed an idleness test); must still be idle */
1385 if (!READ_ONCE(gt->awake))
1386 return true;
1387
1388 for_each_engine(engine, gt, id) {
1389 if (!intel_engine_is_idle(engine))
1390 return false;
1391 }
1392
1393 return true;
1394 }
1395
intel_engine_irq_enable(struct intel_engine_cs * engine)1396 bool intel_engine_irq_enable(struct intel_engine_cs *engine)
1397 {
1398 if (!engine->irq_enable)
1399 return false;
1400
1401 /* Caller disables interrupts */
1402 spin_lock(&engine->gt->irq_lock);
1403 engine->irq_enable(engine);
1404 spin_unlock(&engine->gt->irq_lock);
1405
1406 return true;
1407 }
1408
intel_engine_irq_disable(struct intel_engine_cs * engine)1409 void intel_engine_irq_disable(struct intel_engine_cs *engine)
1410 {
1411 if (!engine->irq_disable)
1412 return;
1413
1414 /* Caller disables interrupts */
1415 spin_lock(&engine->gt->irq_lock);
1416 engine->irq_disable(engine);
1417 spin_unlock(&engine->gt->irq_lock);
1418 }
1419
intel_engines_reset_default_submission(struct intel_gt * gt)1420 void intel_engines_reset_default_submission(struct intel_gt *gt)
1421 {
1422 struct intel_engine_cs *engine;
1423 enum intel_engine_id id;
1424
1425 for_each_engine(engine, gt, id) {
1426 if (engine->sanitize)
1427 engine->sanitize(engine);
1428
1429 engine->set_default_submission(engine);
1430 }
1431 }
1432
intel_engine_can_store_dword(struct intel_engine_cs * engine)1433 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1434 {
1435 switch (GRAPHICS_VER(engine->i915)) {
1436 case 2:
1437 return false; /* uses physical not virtual addresses */
1438 case 3:
1439 /* maybe only uses physical not virtual addresses */
1440 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1441 case 4:
1442 return !IS_I965G(engine->i915); /* who knows! */
1443 case 6:
1444 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1445 default:
1446 return true;
1447 }
1448 }
1449
get_timeline(struct i915_request * rq)1450 static struct intel_timeline *get_timeline(struct i915_request *rq)
1451 {
1452 struct intel_timeline *tl;
1453
1454 /*
1455 * Even though we are holding the engine->sched_engine->lock here, there
1456 * is no control over the submission queue per-se and we are
1457 * inspecting the active state at a random point in time, with an
1458 * unknown queue. Play safe and make sure the timeline remains valid.
1459 * (Only being used for pretty printing, one extra kref shouldn't
1460 * cause a camel stampede!)
1461 */
1462 rcu_read_lock();
1463 tl = rcu_dereference(rq->timeline);
1464 if (!kref_get_unless_zero(&tl->kref))
1465 tl = NULL;
1466 rcu_read_unlock();
1467
1468 return tl;
1469 }
1470
print_ring(char * buf,int sz,struct i915_request * rq)1471 static int print_ring(char *buf, int sz, struct i915_request *rq)
1472 {
1473 int len = 0;
1474
1475 if (!i915_request_signaled(rq)) {
1476 struct intel_timeline *tl = get_timeline(rq);
1477
1478 len = scnprintf(buf, sz,
1479 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
1480 i915_ggtt_offset(rq->ring->vma),
1481 tl ? tl->hwsp_offset : 0,
1482 hwsp_seqno(rq),
1483 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
1484 1000 * 1000));
1485
1486 if (tl)
1487 intel_timeline_put(tl);
1488 }
1489
1490 return len;
1491 }
1492
hexdump(struct drm_printer * m,const void * buf,size_t len)1493 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1494 {
1495 const size_t rowsize = 8 * sizeof(u32);
1496 const void *prev = NULL;
1497 bool skip = false;
1498 size_t pos;
1499
1500 for (pos = 0; pos < len; pos += rowsize) {
1501 char line[128];
1502
1503 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1504 if (!skip) {
1505 drm_printf(m, "*\n");
1506 skip = true;
1507 }
1508 continue;
1509 }
1510
1511 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1512 rowsize, sizeof(u32),
1513 line, sizeof(line),
1514 false) >= sizeof(line));
1515 drm_printf(m, "[%04zx] %s\n", pos, line);
1516
1517 prev = buf + pos;
1518 skip = false;
1519 }
1520 }
1521
repr_timer(const struct timer_list * t)1522 static const char *repr_timer(const struct timer_list *t)
1523 {
1524 if (!READ_ONCE(t->expires))
1525 return "inactive";
1526
1527 if (timer_pending(t))
1528 return "active";
1529
1530 return "expired";
1531 }
1532
intel_engine_print_registers(struct intel_engine_cs * engine,struct drm_printer * m)1533 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1534 struct drm_printer *m)
1535 {
1536 struct drm_i915_private *dev_priv = engine->i915;
1537 struct intel_engine_execlists * const execlists = &engine->execlists;
1538 u64 addr;
1539
1540 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
1541 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1542 if (HAS_EXECLISTS(dev_priv)) {
1543 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
1544 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
1545 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
1546 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
1547 }
1548 drm_printf(m, "\tRING_START: 0x%08x\n",
1549 ENGINE_READ(engine, RING_START));
1550 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1551 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1552 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1553 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1554 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1555 ENGINE_READ(engine, RING_CTL),
1556 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1557 if (GRAPHICS_VER(engine->i915) > 2) {
1558 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1559 ENGINE_READ(engine, RING_MI_MODE),
1560 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1561 }
1562
1563 if (GRAPHICS_VER(dev_priv) >= 6) {
1564 drm_printf(m, "\tRING_IMR: 0x%08x\n",
1565 ENGINE_READ(engine, RING_IMR));
1566 drm_printf(m, "\tRING_ESR: 0x%08x\n",
1567 ENGINE_READ(engine, RING_ESR));
1568 drm_printf(m, "\tRING_EMR: 0x%08x\n",
1569 ENGINE_READ(engine, RING_EMR));
1570 drm_printf(m, "\tRING_EIR: 0x%08x\n",
1571 ENGINE_READ(engine, RING_EIR));
1572 }
1573
1574 addr = intel_engine_get_active_head(engine);
1575 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1576 upper_32_bits(addr), lower_32_bits(addr));
1577 addr = intel_engine_get_last_batch_head(engine);
1578 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1579 upper_32_bits(addr), lower_32_bits(addr));
1580 if (GRAPHICS_VER(dev_priv) >= 8)
1581 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1582 else if (GRAPHICS_VER(dev_priv) >= 4)
1583 addr = ENGINE_READ(engine, RING_DMA_FADD);
1584 else
1585 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1586 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1587 upper_32_bits(addr), lower_32_bits(addr));
1588 if (GRAPHICS_VER(dev_priv) >= 4) {
1589 drm_printf(m, "\tIPEIR: 0x%08x\n",
1590 ENGINE_READ(engine, RING_IPEIR));
1591 drm_printf(m, "\tIPEHR: 0x%08x\n",
1592 ENGINE_READ(engine, RING_IPEHR));
1593 } else {
1594 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1595 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1596 }
1597
1598 if (intel_engine_uses_guc(engine)) {
1599 /* nothing to print yet */
1600 } else if (HAS_EXECLISTS(dev_priv)) {
1601 struct i915_request * const *port, *rq;
1602 const u32 *hws =
1603 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1604 const u8 num_entries = execlists->csb_size;
1605 unsigned int idx;
1606 u8 read, write;
1607
1608 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1609 yesno(test_bit(TASKLET_STATE_SCHED,
1610 &engine->sched_engine->tasklet.state)),
1611 enableddisabled(!atomic_read(&engine->sched_engine->tasklet.count)),
1612 repr_timer(&engine->execlists.preempt),
1613 repr_timer(&engine->execlists.timer));
1614
1615 read = execlists->csb_head;
1616 write = READ_ONCE(*execlists->csb_write);
1617
1618 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1619 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1620 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1621 read, write, num_entries);
1622
1623 if (read >= num_entries)
1624 read = 0;
1625 if (write >= num_entries)
1626 write = 0;
1627 if (read > write)
1628 write += num_entries;
1629 while (read < write) {
1630 idx = ++read % num_entries;
1631 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1632 idx, hws[idx * 2], hws[idx * 2 + 1]);
1633 }
1634
1635 i915_sched_engine_active_lock_bh(engine->sched_engine);
1636 rcu_read_lock();
1637 for (port = execlists->active; (rq = *port); port++) {
1638 char hdr[160];
1639 int len;
1640
1641 len = scnprintf(hdr, sizeof(hdr),
1642 "\t\tActive[%d]: ccid:%08x%s%s, ",
1643 (int)(port - execlists->active),
1644 rq->context->lrc.ccid,
1645 intel_context_is_closed(rq->context) ? "!" : "",
1646 intel_context_is_banned(rq->context) ? "*" : "");
1647 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1648 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1649 i915_request_show(m, rq, hdr, 0);
1650 }
1651 for (port = execlists->pending; (rq = *port); port++) {
1652 char hdr[160];
1653 int len;
1654
1655 len = scnprintf(hdr, sizeof(hdr),
1656 "\t\tPending[%d]: ccid:%08x%s%s, ",
1657 (int)(port - execlists->pending),
1658 rq->context->lrc.ccid,
1659 intel_context_is_closed(rq->context) ? "!" : "",
1660 intel_context_is_banned(rq->context) ? "*" : "");
1661 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1662 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1663 i915_request_show(m, rq, hdr, 0);
1664 }
1665 rcu_read_unlock();
1666 i915_sched_engine_active_unlock_bh(engine->sched_engine);
1667 } else if (GRAPHICS_VER(dev_priv) > 6) {
1668 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1669 ENGINE_READ(engine, RING_PP_DIR_BASE));
1670 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1671 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1672 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1673 ENGINE_READ(engine, RING_PP_DIR_DCLV));
1674 }
1675 }
1676
print_request_ring(struct drm_printer * m,struct i915_request * rq)1677 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1678 {
1679 void *ring;
1680 int size;
1681
1682 drm_printf(m,
1683 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1684 rq->head, rq->postfix, rq->tail,
1685 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1686 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1687
1688 size = rq->tail - rq->head;
1689 if (rq->tail < rq->head)
1690 size += rq->ring->size;
1691
1692 ring = kmalloc(size, GFP_ATOMIC);
1693 if (ring) {
1694 const void *vaddr = rq->ring->vaddr;
1695 unsigned int head = rq->head;
1696 unsigned int len = 0;
1697
1698 if (rq->tail < head) {
1699 len = rq->ring->size - head;
1700 memcpy(ring, vaddr + head, len);
1701 head = 0;
1702 }
1703 memcpy(ring + len, vaddr + head, size - len);
1704
1705 hexdump(m, ring, size);
1706 kfree(ring);
1707 }
1708 }
1709
list_count(struct list_head * list)1710 static unsigned long list_count(struct list_head *list)
1711 {
1712 struct list_head *pos;
1713 unsigned long count = 0;
1714
1715 list_for_each(pos, list)
1716 count++;
1717
1718 return count;
1719 }
1720
read_ul(void * p,size_t x)1721 static unsigned long read_ul(void *p, size_t x)
1722 {
1723 return *(unsigned long *)(p + x);
1724 }
1725
print_properties(struct intel_engine_cs * engine,struct drm_printer * m)1726 static void print_properties(struct intel_engine_cs *engine,
1727 struct drm_printer *m)
1728 {
1729 static const struct pmap {
1730 size_t offset;
1731 const char *name;
1732 } props[] = {
1733 #define P(x) { \
1734 .offset = offsetof(typeof(engine->props), x), \
1735 .name = #x \
1736 }
1737 P(heartbeat_interval_ms),
1738 P(max_busywait_duration_ns),
1739 P(preempt_timeout_ms),
1740 P(stop_timeout_ms),
1741 P(timeslice_duration_ms),
1742
1743 {},
1744 #undef P
1745 };
1746 const struct pmap *p;
1747
1748 drm_printf(m, "\tProperties:\n");
1749 for (p = props; p->name; p++)
1750 drm_printf(m, "\t\t%s: %lu [default %lu]\n",
1751 p->name,
1752 read_ul(&engine->props, p->offset),
1753 read_ul(&engine->defaults, p->offset));
1754 }
1755
engine_dump_request(struct i915_request * rq,struct drm_printer * m,const char * msg)1756 static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
1757 {
1758 struct intel_timeline *tl = get_timeline(rq);
1759
1760 i915_request_show(m, rq, msg, 0);
1761
1762 drm_printf(m, "\t\tring->start: 0x%08x\n",
1763 i915_ggtt_offset(rq->ring->vma));
1764 drm_printf(m, "\t\tring->head: 0x%08x\n",
1765 rq->ring->head);
1766 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1767 rq->ring->tail);
1768 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1769 rq->ring->emit);
1770 drm_printf(m, "\t\tring->space: 0x%08x\n",
1771 rq->ring->space);
1772
1773 if (tl) {
1774 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1775 tl->hwsp_offset);
1776 intel_timeline_put(tl);
1777 }
1778
1779 print_request_ring(m, rq);
1780
1781 if (rq->context->lrc_reg_state) {
1782 drm_printf(m, "Logical Ring Context:\n");
1783 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1784 }
1785 }
1786
intel_engine_dump_active_requests(struct list_head * requests,struct i915_request * hung_rq,struct drm_printer * m)1787 void intel_engine_dump_active_requests(struct list_head *requests,
1788 struct i915_request *hung_rq,
1789 struct drm_printer *m)
1790 {
1791 struct i915_request *rq;
1792 const char *msg;
1793 enum i915_request_state state;
1794
1795 list_for_each_entry(rq, requests, sched.link) {
1796 if (rq == hung_rq)
1797 continue;
1798
1799 state = i915_test_request_state(rq);
1800 if (state < I915_REQUEST_QUEUED)
1801 continue;
1802
1803 if (state == I915_REQUEST_ACTIVE)
1804 msg = "\t\tactive on engine";
1805 else
1806 msg = "\t\tactive in queue";
1807
1808 engine_dump_request(rq, m, msg);
1809 }
1810 }
1811
engine_dump_active_requests(struct intel_engine_cs * engine,struct drm_printer * m)1812 static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
1813 {
1814 struct i915_request *hung_rq = NULL;
1815 struct intel_context *ce;
1816 bool guc;
1817
1818 /*
1819 * No need for an engine->irq_seqno_barrier() before the seqno reads.
1820 * The GPU is still running so requests are still executing and any
1821 * hardware reads will be out of date by the time they are reported.
1822 * But the intention here is just to report an instantaneous snapshot
1823 * so that's fine.
1824 */
1825 lockdep_assert_held(&engine->sched_engine->lock);
1826
1827 drm_printf(m, "\tRequests:\n");
1828
1829 guc = intel_uc_uses_guc_submission(&engine->gt->uc);
1830 if (guc) {
1831 ce = intel_engine_get_hung_context(engine);
1832 if (ce)
1833 hung_rq = intel_context_find_active_request(ce);
1834 } else {
1835 hung_rq = intel_engine_execlist_find_hung_request(engine);
1836 }
1837
1838 if (hung_rq)
1839 engine_dump_request(hung_rq, m, "\t\thung");
1840
1841 if (guc)
1842 intel_guc_dump_active_requests(engine, hung_rq, m);
1843 else
1844 intel_engine_dump_active_requests(&engine->sched_engine->requests,
1845 hung_rq, m);
1846 }
1847
intel_engine_dump(struct intel_engine_cs * engine,struct drm_printer * m,const char * header,...)1848 void intel_engine_dump(struct intel_engine_cs *engine,
1849 struct drm_printer *m,
1850 const char *header, ...)
1851 {
1852 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1853 struct i915_request *rq;
1854 intel_wakeref_t wakeref;
1855 unsigned long flags;
1856 ktime_t dummy;
1857
1858 if (header) {
1859 va_list ap;
1860
1861 va_start(ap, header);
1862 drm_vprintf(m, header, &ap);
1863 va_end(ap);
1864 }
1865
1866 if (intel_gt_is_wedged(engine->gt))
1867 drm_printf(m, "*** WEDGED ***\n");
1868
1869 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1870 drm_printf(m, "\tBarriers?: %s\n",
1871 yesno(!llist_empty(&engine->barrier_tasks)));
1872 drm_printf(m, "\tLatency: %luus\n",
1873 ewma__engine_latency_read(&engine->latency));
1874 if (intel_engine_supports_stats(engine))
1875 drm_printf(m, "\tRuntime: %llums\n",
1876 ktime_to_ms(intel_engine_get_busy_time(engine,
1877 &dummy)));
1878 drm_printf(m, "\tForcewake: %x domains, %d active\n",
1879 engine->fw_domain, READ_ONCE(engine->fw_active));
1880
1881 rcu_read_lock();
1882 rq = READ_ONCE(engine->heartbeat.systole);
1883 if (rq)
1884 drm_printf(m, "\tHeartbeat: %d ms ago\n",
1885 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1886 rcu_read_unlock();
1887 drm_printf(m, "\tReset count: %d (global %d)\n",
1888 i915_reset_engine_count(error, engine),
1889 i915_reset_count(error));
1890 print_properties(engine, m);
1891
1892 spin_lock_irqsave(&engine->sched_engine->lock, flags);
1893 engine_dump_active_requests(engine, m);
1894
1895 drm_printf(m, "\tOn hold?: %lu\n",
1896 list_count(&engine->sched_engine->hold));
1897 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
1898
1899 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
1900 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1901 if (wakeref) {
1902 intel_engine_print_registers(engine, m);
1903 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1904 } else {
1905 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1906 }
1907
1908 intel_execlists_show_requests(engine, m, i915_request_show, 8);
1909
1910 drm_printf(m, "HWSP:\n");
1911 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1912
1913 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1914
1915 intel_engine_print_breadcrumbs(engine, m);
1916 }
1917
__intel_engine_get_busy_time(struct intel_engine_cs * engine,ktime_t * now)1918 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
1919 ktime_t *now)
1920 {
1921 ktime_t total = engine->stats.total;
1922
1923 /*
1924 * If the engine is executing something at the moment
1925 * add it to the total.
1926 */
1927 *now = ktime_get();
1928 if (READ_ONCE(engine->stats.active))
1929 total = ktime_add(total, ktime_sub(*now, engine->stats.start));
1930
1931 return total;
1932 }
1933
1934 /**
1935 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1936 * @engine: engine to report on
1937 * @now: monotonic timestamp of sampling
1938 *
1939 * Returns accumulated time @engine was busy since engine stats were enabled.
1940 */
intel_engine_get_busy_time(struct intel_engine_cs * engine,ktime_t * now)1941 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
1942 {
1943 unsigned int seq;
1944 ktime_t total;
1945
1946 do {
1947 seq = read_seqcount_begin(&engine->stats.lock);
1948 total = __intel_engine_get_busy_time(engine, now);
1949 } while (read_seqcount_retry(&engine->stats.lock, seq));
1950
1951 return total;
1952 }
1953
1954 struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs ** siblings,unsigned int count,unsigned long flags)1955 intel_engine_create_virtual(struct intel_engine_cs **siblings,
1956 unsigned int count, unsigned long flags)
1957 {
1958 if (count == 0)
1959 return ERR_PTR(-EINVAL);
1960
1961 if (count == 1 && !(flags & FORCE_VIRTUAL))
1962 return intel_context_create(siblings[0]);
1963
1964 GEM_BUG_ON(!siblings[0]->cops->create_virtual);
1965 return siblings[0]->cops->create_virtual(siblings, count, flags);
1966 }
1967
1968 struct i915_request *
intel_engine_execlist_find_hung_request(struct intel_engine_cs * engine)1969 intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
1970 {
1971 struct i915_request *request, *active = NULL;
1972
1973 /*
1974 * This search does not work in GuC submission mode. However, the GuC
1975 * will report the hanging context directly to the driver itself. So
1976 * the driver should never get here when in GuC mode.
1977 */
1978 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
1979
1980 /*
1981 * We are called by the error capture, reset and to dump engine
1982 * state at random points in time. In particular, note that neither is
1983 * crucially ordered with an interrupt. After a hang, the GPU is dead
1984 * and we assume that no more writes can happen (we waited long enough
1985 * for all writes that were in transaction to be flushed) - adding an
1986 * extra delay for a recent interrupt is pointless. Hence, we do
1987 * not need an engine->irq_seqno_barrier() before the seqno reads.
1988 * At all other times, we must assume the GPU is still running, but
1989 * we only care about the snapshot of this moment.
1990 */
1991 lockdep_assert_held(&engine->sched_engine->lock);
1992
1993 rcu_read_lock();
1994 request = execlists_active(&engine->execlists);
1995 if (request) {
1996 struct intel_timeline *tl = request->context->timeline;
1997
1998 list_for_each_entry_from_reverse(request, &tl->requests, link) {
1999 if (__i915_request_is_complete(request))
2000 break;
2001
2002 active = request;
2003 }
2004 }
2005 rcu_read_unlock();
2006 if (active)
2007 return active;
2008
2009 list_for_each_entry(request, &engine->sched_engine->requests,
2010 sched.link) {
2011 if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
2012 continue;
2013
2014 active = request;
2015 break;
2016 }
2017
2018 return active;
2019 }
2020
2021 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2022 #include "mock_engine.c"
2023 #include "selftest_engine.c"
2024 #include "selftest_engine_cs.c"
2025 #endif
2026