1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014-2019 Intel Corporation
4 */
5
6 #include "gem/i915_gem_lmem.h"
7 #include "gt/intel_gt.h"
8 #include "gt/intel_gt_irq.h"
9 #include "gt/intel_gt_pm_irq.h"
10 #include "intel_guc.h"
11 #include "intel_guc_slpc.h"
12 #include "intel_guc_ads.h"
13 #include "intel_guc_submission.h"
14 #include "i915_drv.h"
15
16 /**
17 * DOC: GuC
18 *
19 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
20 * designed to offload some of the functionality usually performed by the host
21 * driver; currently the main operations it can take care of are:
22 *
23 * - Authentication of the HuC, which is required to fully enable HuC usage.
24 * - Low latency graphics context scheduling (a.k.a. GuC submission).
25 * - GT Power management.
26 *
27 * The enable_guc module parameter can be used to select which of those
28 * operations to enable within GuC. Note that not all the operations are
29 * supported on all gen9+ platforms.
30 *
31 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
32 * if at least one of the operations is selected. However, not loading the GuC
33 * might result in the loss of some features that do require the GuC (currently
34 * just the HuC, but more are expected to land in the future).
35 */
36
intel_guc_notify(struct intel_guc * guc)37 void intel_guc_notify(struct intel_guc *guc)
38 {
39 struct intel_gt *gt = guc_to_gt(guc);
40
41 /*
42 * On Gen11+, the value written to the register is passes as a payload
43 * to the FW. However, the FW currently treats all values the same way
44 * (H2G interrupt), so we can just write the value that the HW expects
45 * on older gens.
46 */
47 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
48 }
49
guc_send_reg(struct intel_guc * guc,u32 i)50 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
51 {
52 GEM_BUG_ON(!guc->send_regs.base);
53 GEM_BUG_ON(!guc->send_regs.count);
54 GEM_BUG_ON(i >= guc->send_regs.count);
55
56 return _MMIO(guc->send_regs.base + 4 * i);
57 }
58
intel_guc_init_send_regs(struct intel_guc * guc)59 void intel_guc_init_send_regs(struct intel_guc *guc)
60 {
61 struct intel_gt *gt = guc_to_gt(guc);
62 enum forcewake_domains fw_domains = 0;
63 unsigned int i;
64
65 GEM_BUG_ON(!guc->send_regs.base);
66 GEM_BUG_ON(!guc->send_regs.count);
67
68 for (i = 0; i < guc->send_regs.count; i++) {
69 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
70 guc_send_reg(guc, i),
71 FW_REG_READ | FW_REG_WRITE);
72 }
73 guc->send_regs.fw_domains = fw_domains;
74 }
75
gen9_reset_guc_interrupts(struct intel_guc * guc)76 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
77 {
78 struct intel_gt *gt = guc_to_gt(guc);
79
80 assert_rpm_wakelock_held(>->i915->runtime_pm);
81
82 spin_lock_irq(>->irq_lock);
83 gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
84 spin_unlock_irq(>->irq_lock);
85 }
86
gen9_enable_guc_interrupts(struct intel_guc * guc)87 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
88 {
89 struct intel_gt *gt = guc_to_gt(guc);
90
91 assert_rpm_wakelock_held(>->i915->runtime_pm);
92
93 spin_lock_irq(>->irq_lock);
94 WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
95 gt->pm_guc_events);
96 gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
97 spin_unlock_irq(>->irq_lock);
98 }
99
gen9_disable_guc_interrupts(struct intel_guc * guc)100 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
101 {
102 struct intel_gt *gt = guc_to_gt(guc);
103
104 assert_rpm_wakelock_held(>->i915->runtime_pm);
105
106 spin_lock_irq(>->irq_lock);
107
108 gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
109
110 spin_unlock_irq(>->irq_lock);
111 intel_synchronize_irq(gt->i915);
112
113 gen9_reset_guc_interrupts(guc);
114 }
115
gen11_reset_guc_interrupts(struct intel_guc * guc)116 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
117 {
118 struct intel_gt *gt = guc_to_gt(guc);
119
120 spin_lock_irq(>->irq_lock);
121 gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
122 spin_unlock_irq(>->irq_lock);
123 }
124
gen11_enable_guc_interrupts(struct intel_guc * guc)125 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
126 {
127 struct intel_gt *gt = guc_to_gt(guc);
128 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
129
130 spin_lock_irq(>->irq_lock);
131 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
132 intel_uncore_write(gt->uncore,
133 GEN11_GUC_SG_INTR_ENABLE, events);
134 intel_uncore_write(gt->uncore,
135 GEN11_GUC_SG_INTR_MASK, ~events);
136 spin_unlock_irq(>->irq_lock);
137 }
138
gen11_disable_guc_interrupts(struct intel_guc * guc)139 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
140 {
141 struct intel_gt *gt = guc_to_gt(guc);
142
143 spin_lock_irq(>->irq_lock);
144
145 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
146 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
147
148 spin_unlock_irq(>->irq_lock);
149 intel_synchronize_irq(gt->i915);
150
151 gen11_reset_guc_interrupts(guc);
152 }
153
intel_guc_init_early(struct intel_guc * guc)154 void intel_guc_init_early(struct intel_guc *guc)
155 {
156 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
157
158 intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
159 intel_guc_ct_init_early(&guc->ct);
160 intel_guc_log_init_early(&guc->log);
161 intel_guc_submission_init_early(guc);
162 intel_guc_slpc_init_early(&guc->slpc);
163 intel_guc_rc_init_early(guc);
164
165 mutex_init(&guc->send_mutex);
166 spin_lock_init(&guc->irq_lock);
167 if (GRAPHICS_VER(i915) >= 11) {
168 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
169 guc->interrupts.reset = gen11_reset_guc_interrupts;
170 guc->interrupts.enable = gen11_enable_guc_interrupts;
171 guc->interrupts.disable = gen11_disable_guc_interrupts;
172 guc->send_regs.base =
173 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
174 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
175
176 } else {
177 guc->notify_reg = GUC_SEND_INTERRUPT;
178 guc->interrupts.reset = gen9_reset_guc_interrupts;
179 guc->interrupts.enable = gen9_enable_guc_interrupts;
180 guc->interrupts.disable = gen9_disable_guc_interrupts;
181 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
182 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
183 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
184 }
185 }
186
intel_guc_init_late(struct intel_guc * guc)187 void intel_guc_init_late(struct intel_guc *guc)
188 {
189 intel_guc_ads_init_late(guc);
190 }
191
guc_ctl_debug_flags(struct intel_guc * guc)192 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
193 {
194 u32 level = intel_guc_log_get_level(&guc->log);
195 u32 flags = 0;
196
197 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
198 flags |= GUC_LOG_DISABLED;
199 else
200 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
201 GUC_LOG_VERBOSITY_SHIFT;
202
203 return flags;
204 }
205
guc_ctl_feature_flags(struct intel_guc * guc)206 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
207 {
208 u32 flags = 0;
209
210 if (!intel_guc_submission_is_used(guc))
211 flags |= GUC_CTL_DISABLE_SCHEDULER;
212
213 if (intel_guc_slpc_is_used(guc))
214 flags |= GUC_CTL_ENABLE_SLPC;
215
216 return flags;
217 }
218
guc_ctl_log_params_flags(struct intel_guc * guc)219 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
220 {
221 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
222 u32 flags;
223
224 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
225 #define UNIT SZ_1M
226 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
227 #else
228 #define UNIT SZ_4K
229 #define FLAG 0
230 #endif
231
232 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
233 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
234 BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
235 BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
236
237 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
238 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
239 BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
240 (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
241
242 flags = GUC_LOG_VALID |
243 GUC_LOG_NOTIFY_ON_HALF_FULL |
244 FLAG |
245 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
246 ((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
247 (offset << GUC_LOG_BUF_ADDR_SHIFT);
248
249 #undef UNIT
250 #undef FLAG
251
252 return flags;
253 }
254
guc_ctl_ads_flags(struct intel_guc * guc)255 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
256 {
257 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
258 u32 flags = ads << GUC_ADS_ADDR_SHIFT;
259
260 return flags;
261 }
262
263 /*
264 * Initialise the GuC parameter block before starting the firmware
265 * transfer. These parameters are read by the firmware on startup
266 * and cannot be changed thereafter.
267 */
guc_init_params(struct intel_guc * guc)268 static void guc_init_params(struct intel_guc *guc)
269 {
270 u32 *params = guc->params;
271 int i;
272
273 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
274
275 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
276 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
277 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
278 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
279
280 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
281 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
282 }
283
284 /*
285 * Initialise the GuC parameter block before starting the firmware
286 * transfer. These parameters are read by the firmware on startup
287 * and cannot be changed thereafter.
288 */
intel_guc_write_params(struct intel_guc * guc)289 void intel_guc_write_params(struct intel_guc *guc)
290 {
291 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
292 int i;
293
294 /*
295 * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
296 * they are power context saved so it's ok to release forcewake
297 * when we are done here and take it again at xfer time.
298 */
299 intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
300
301 intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
302
303 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
304 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
305
306 intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
307 }
308
intel_guc_init(struct intel_guc * guc)309 int intel_guc_init(struct intel_guc *guc)
310 {
311 struct intel_gt *gt = guc_to_gt(guc);
312 int ret;
313
314 ret = intel_uc_fw_init(&guc->fw);
315 if (ret)
316 goto out;
317
318 ret = intel_guc_log_create(&guc->log);
319 if (ret)
320 goto err_fw;
321
322 ret = intel_guc_ads_create(guc);
323 if (ret)
324 goto err_log;
325 GEM_BUG_ON(!guc->ads_vma);
326
327 ret = intel_guc_ct_init(&guc->ct);
328 if (ret)
329 goto err_ads;
330
331 if (intel_guc_submission_is_used(guc)) {
332 /*
333 * This is stuff we need to have available at fw load time
334 * if we are planning to enable submission later
335 */
336 ret = intel_guc_submission_init(guc);
337 if (ret)
338 goto err_ct;
339 }
340
341 if (intel_guc_slpc_is_used(guc)) {
342 ret = intel_guc_slpc_init(&guc->slpc);
343 if (ret)
344 goto err_submission;
345 }
346
347 /* now that everything is perma-pinned, initialize the parameters */
348 guc_init_params(guc);
349
350 /* We need to notify the guc whenever we change the GGTT */
351 i915_ggtt_enable_guc(gt->ggtt);
352
353 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
354
355 return 0;
356
357 err_submission:
358 intel_guc_submission_fini(guc);
359 err_ct:
360 intel_guc_ct_fini(&guc->ct);
361 err_ads:
362 intel_guc_ads_destroy(guc);
363 err_log:
364 intel_guc_log_destroy(&guc->log);
365 err_fw:
366 intel_uc_fw_fini(&guc->fw);
367 out:
368 i915_probe_error(gt->i915, "failed with %d\n", ret);
369 return ret;
370 }
371
intel_guc_fini(struct intel_guc * guc)372 void intel_guc_fini(struct intel_guc *guc)
373 {
374 struct intel_gt *gt = guc_to_gt(guc);
375
376 if (!intel_uc_fw_is_loadable(&guc->fw))
377 return;
378
379 i915_ggtt_disable_guc(gt->ggtt);
380
381 if (intel_guc_slpc_is_used(guc))
382 intel_guc_slpc_fini(&guc->slpc);
383
384 if (intel_guc_submission_is_used(guc))
385 intel_guc_submission_fini(guc);
386
387 intel_guc_ct_fini(&guc->ct);
388
389 intel_guc_ads_destroy(guc);
390 intel_guc_log_destroy(&guc->log);
391 intel_uc_fw_fini(&guc->fw);
392 }
393
394 /*
395 * This function implements the MMIO based host to GuC interface.
396 */
intel_guc_send_mmio(struct intel_guc * guc,const u32 * request,u32 len,u32 * response_buf,u32 response_buf_size)397 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
398 u32 *response_buf, u32 response_buf_size)
399 {
400 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
401 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
402 u32 header;
403 int i;
404 int ret;
405
406 GEM_BUG_ON(!len);
407 GEM_BUG_ON(len > guc->send_regs.count);
408
409 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
410 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
411
412 mutex_lock(&guc->send_mutex);
413 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
414
415 retry:
416 for (i = 0; i < len; i++)
417 intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
418
419 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
420
421 intel_guc_notify(guc);
422
423 /*
424 * No GuC command should ever take longer than 10ms.
425 * Fast commands should still complete in 10us.
426 */
427 ret = __intel_wait_for_register_fw(uncore,
428 guc_send_reg(guc, 0),
429 GUC_HXG_MSG_0_ORIGIN,
430 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
431 GUC_HXG_ORIGIN_GUC),
432 10, 10, &header);
433 if (unlikely(ret)) {
434 timeout:
435 drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
436 request[0], header);
437 goto out;
438 }
439
440 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
441 #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
442 FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
443 FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
444
445 ret = wait_for(done, 1000);
446 if (unlikely(ret))
447 goto timeout;
448 if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
449 GUC_HXG_ORIGIN_GUC))
450 goto proto;
451 #undef done
452 }
453
454 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
455 u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
456
457 drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
458 request[0], reason);
459 goto retry;
460 }
461
462 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
463 u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
464 u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
465
466 drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
467 request[0], error, hint);
468 ret = -ENXIO;
469 goto out;
470 }
471
472 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
473 proto:
474 drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
475 request[0], header);
476 ret = -EPROTO;
477 goto out;
478 }
479
480 if (response_buf) {
481 int count = min(response_buf_size, guc->send_regs.count);
482
483 GEM_BUG_ON(!count);
484
485 response_buf[0] = header;
486
487 for (i = 1; i < count; i++)
488 response_buf[i] = intel_uncore_read(uncore,
489 guc_send_reg(guc, i));
490
491 /* Use number of copied dwords as our return value */
492 ret = count;
493 } else {
494 /* Use data from the GuC response as our return value */
495 ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
496 }
497
498 out:
499 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
500 mutex_unlock(&guc->send_mutex);
501
502 return ret;
503 }
504
intel_guc_to_host_process_recv_msg(struct intel_guc * guc,const u32 * payload,u32 len)505 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
506 const u32 *payload, u32 len)
507 {
508 u32 msg;
509
510 if (unlikely(!len))
511 return -EPROTO;
512
513 /* Make sure to handle only enabled messages */
514 msg = payload[0] & guc->msg_enabled_mask;
515
516 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
517 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
518 intel_guc_log_handle_flush_event(&guc->log);
519
520 return 0;
521 }
522
523 /**
524 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
525 * @guc: intel_guc structure
526 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
527 *
528 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
529 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
530 * intel_huc_auth().
531 *
532 * Return: non-zero code on error
533 */
intel_guc_auth_huc(struct intel_guc * guc,u32 rsa_offset)534 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
535 {
536 u32 action[] = {
537 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
538 rsa_offset
539 };
540
541 return intel_guc_send(guc, action, ARRAY_SIZE(action));
542 }
543
544 /**
545 * intel_guc_suspend() - notify GuC entering suspend state
546 * @guc: the guc
547 */
intel_guc_suspend(struct intel_guc * guc)548 int intel_guc_suspend(struct intel_guc *guc)
549 {
550 int ret;
551 u32 action[] = {
552 INTEL_GUC_ACTION_RESET_CLIENT,
553 };
554
555 if (!intel_guc_is_ready(guc))
556 return 0;
557
558 if (intel_guc_submission_is_used(guc)) {
559 /*
560 * This H2G MMIO command tears down the GuC in two steps. First it will
561 * generate a G2H CTB for every active context indicating a reset. In
562 * practice the i915 shouldn't ever get a G2H as suspend should only be
563 * called when the GPU is idle. Next, it tears down the CTBs and this
564 * H2G MMIO command completes.
565 *
566 * Don't abort on a failure code from the GuC. Keep going and do the
567 * clean up in santize() and re-initialisation on resume and hopefully
568 * the error here won't be problematic.
569 */
570 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
571 if (ret)
572 DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
573 }
574
575 /* Signal that the GuC isn't running. */
576 intel_guc_sanitize(guc);
577
578 return 0;
579 }
580
581 /**
582 * intel_guc_resume() - notify GuC resuming from suspend state
583 * @guc: the guc
584 */
intel_guc_resume(struct intel_guc * guc)585 int intel_guc_resume(struct intel_guc *guc)
586 {
587 /*
588 * NB: This function can still be called even if GuC submission is
589 * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
590 * if any code is later added here, it must be support doing nothing
591 * if submission is disabled (as per intel_guc_suspend).
592 */
593 return 0;
594 }
595
596 /**
597 * DOC: GuC Memory Management
598 *
599 * GuC can't allocate any memory for its own usage, so all the allocations must
600 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
601 * exception of the top and bottom parts of the 4GB address space, which are
602 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
603 * or other parts of the HW. The driver must take care not to place objects that
604 * the GuC is going to access in these reserved ranges. The layout of the GuC
605 * address space is shown below:
606 *
607 * ::
608 *
609 * +===========> +====================+ <== FFFF_FFFF
610 * ^ | Reserved |
611 * | +====================+ <== GUC_GGTT_TOP
612 * | | |
613 * | | DRAM |
614 * GuC | |
615 * Address +===> +====================+ <== GuC ggtt_pin_bias
616 * Space ^ | |
617 * | | | |
618 * | GuC | GuC |
619 * | WOPCM | WOPCM |
620 * | Size | |
621 * | | | |
622 * v v | |
623 * +=======+===> +====================+ <== 0000_0000
624 *
625 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
626 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
627 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
628 */
629
630 /**
631 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
632 * @guc: the guc
633 * @size: size of area to allocate (both virtual space and memory)
634 *
635 * This is a wrapper to create an object for use with the GuC. In order to
636 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
637 * both some backing storage and a range inside the Global GTT. We must pin
638 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
639 * range is reserved inside GuC.
640 *
641 * Return: A i915_vma if successful, otherwise an ERR_PTR.
642 */
intel_guc_allocate_vma(struct intel_guc * guc,u32 size)643 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
644 {
645 struct intel_gt *gt = guc_to_gt(guc);
646 struct drm_i915_gem_object *obj;
647 struct i915_vma *vma;
648 u64 flags;
649 int ret;
650
651 if (HAS_LMEM(gt->i915))
652 obj = i915_gem_object_create_lmem(gt->i915, size,
653 I915_BO_ALLOC_CPU_CLEAR |
654 I915_BO_ALLOC_CONTIGUOUS |
655 I915_BO_ALLOC_PM_EARLY);
656 else
657 obj = i915_gem_object_create_shmem(gt->i915, size);
658
659 if (IS_ERR(obj))
660 return ERR_CAST(obj);
661
662 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
663 if (IS_ERR(vma))
664 goto err;
665
666 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
667 ret = i915_ggtt_pin(vma, NULL, 0, flags);
668 if (ret) {
669 vma = ERR_PTR(ret);
670 goto err;
671 }
672
673 return i915_vma_make_unshrinkable(vma);
674
675 err:
676 i915_gem_object_put(obj);
677 return vma;
678 }
679
680 /**
681 * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
682 * @guc: the guc
683 * @size: size of area to allocate (both virtual space and memory)
684 * @out_vma: return variable for the allocated vma pointer
685 * @out_vaddr: return variable for the obj mapping
686 *
687 * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
688 * object with I915_MAP_WB.
689 *
690 * Return: 0 if successful, a negative errno code otherwise.
691 */
intel_guc_allocate_and_map_vma(struct intel_guc * guc,u32 size,struct i915_vma ** out_vma,void ** out_vaddr)692 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
693 struct i915_vma **out_vma, void **out_vaddr)
694 {
695 struct i915_vma *vma;
696 void *vaddr;
697
698 vma = intel_guc_allocate_vma(guc, size);
699 if (IS_ERR(vma))
700 return PTR_ERR(vma);
701
702 vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
703 i915_coherent_map_type(guc_to_gt(guc)->i915,
704 vma->obj, true));
705 if (IS_ERR(vaddr)) {
706 i915_vma_unpin_and_release(&vma, 0);
707 return PTR_ERR(vaddr);
708 }
709
710 *out_vma = vma;
711 *out_vaddr = vaddr;
712
713 return 0;
714 }
715
716 /**
717 * intel_guc_load_status - dump information about GuC load status
718 * @guc: the GuC
719 * @p: the &drm_printer
720 *
721 * Pretty printer for GuC load status.
722 */
intel_guc_load_status(struct intel_guc * guc,struct drm_printer * p)723 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
724 {
725 struct intel_gt *gt = guc_to_gt(guc);
726 struct intel_uncore *uncore = gt->uncore;
727 intel_wakeref_t wakeref;
728
729 if (!intel_guc_is_supported(guc)) {
730 drm_printf(p, "GuC not supported\n");
731 return;
732 }
733
734 if (!intel_guc_is_wanted(guc)) {
735 drm_printf(p, "GuC disabled\n");
736 return;
737 }
738
739 intel_uc_fw_dump(&guc->fw, p);
740
741 with_intel_runtime_pm(uncore->rpm, wakeref) {
742 u32 status = intel_uncore_read(uncore, GUC_STATUS);
743 u32 i;
744
745 drm_printf(p, "\nGuC status 0x%08x:\n", status);
746 drm_printf(p, "\tBootrom status = 0x%x\n",
747 (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
748 drm_printf(p, "\tuKernel status = 0x%x\n",
749 (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
750 drm_printf(p, "\tMIA Core status = 0x%x\n",
751 (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
752 drm_puts(p, "\nScratch registers:\n");
753 for (i = 0; i < 16; i++) {
754 drm_printf(p, "\t%2d: \t0x%x\n",
755 i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
756 }
757 }
758 }
759
intel_guc_write_barrier(struct intel_guc * guc)760 void intel_guc_write_barrier(struct intel_guc *guc)
761 {
762 struct intel_gt *gt = guc_to_gt(guc);
763
764 if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
765 /*
766 * Ensure intel_uncore_write_fw can be used rather than
767 * intel_uncore_write.
768 */
769 GEM_BUG_ON(guc->send_regs.fw_domains);
770
771 /*
772 * This register is used by the i915 and GuC for MMIO based
773 * communication. Once we are in this code CTBs are the only
774 * method the i915 uses to communicate with the GuC so it is
775 * safe to write to this register (a value of 0 is NOP for MMIO
776 * communication). If we ever start mixing CTBs and MMIOs a new
777 * register will have to be chosen. This function is also used
778 * to enforce ordering of a work queue item write and an update
779 * to the process descriptor. When a work queue is being used,
780 * CTBs are also the only mechanism of communication.
781 */
782 intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
783 } else {
784 /* wmb() sufficient for a barrier if in smem */
785 wmb();
786 }
787 }
788