1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016-2019 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <drm/drm_print.h>
9
10 #include "gem/i915_gem_lmem.h"
11 #include "intel_uc_fw.h"
12 #include "intel_uc_fw_abi.h"
13 #include "i915_drv.h"
14
15 static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw * uc_fw,enum intel_uc_fw_type type)16 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
17 {
18 if (type == INTEL_UC_FW_TYPE_GUC)
19 return container_of(uc_fw, struct intel_gt, uc.guc.fw);
20
21 GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
22 return container_of(uc_fw, struct intel_gt, uc.huc.fw);
23 }
24
__uc_fw_to_gt(struct intel_uc_fw * uc_fw)25 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
26 {
27 GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
28 return ____uc_fw_to_gt(uc_fw, uc_fw->type);
29 }
30
31 #ifdef CONFIG_DRM_I915_DEBUG_GUC
intel_uc_fw_change_status(struct intel_uc_fw * uc_fw,enum intel_uc_fw_status status)32 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
33 enum intel_uc_fw_status status)
34 {
35 uc_fw->__status = status;
36 drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
37 "%s firmware -> %s\n",
38 intel_uc_fw_type_repr(uc_fw->type),
39 status == INTEL_UC_FIRMWARE_SELECTED ?
40 uc_fw->path : intel_uc_fw_status_repr(status));
41 }
42 #endif
43
44 /*
45 * List of required GuC and HuC binaries per-platform.
46 * Must be ordered based on platform + revid, from newer to older.
47 *
48 * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
49 * firmware as TGL.
50 */
51 #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
52 fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \
53 fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
54 fw_def(DG1, 0, guc_def(dg1, 62, 0, 0), huc_def(dg1, 7, 9, 3)) \
55 fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
56 fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
57 fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
58 fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
59 fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0), huc_def(icl, 9, 0, 0)) \
60 fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0), huc_def(cml, 4, 0, 0)) \
61 fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
62 fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
63 fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0), huc_def(glk, 4, 0, 0)) \
64 fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
65 fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0), huc_def(bxt, 2, 0, 0)) \
66 fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0), huc_def(skl, 2, 0, 0))
67
68 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
69 "i915/" \
70 __stringify(prefix_) name_ \
71 __stringify(major_) "." \
72 __stringify(minor_) "." \
73 __stringify(patch_) ".bin"
74
75 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
76 __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
77
78 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
79 __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
80
81 /* All blobs need to be declared via MODULE_FIRMWARE() */
82 #define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
83 MODULE_FIRMWARE(guc_); \
84 MODULE_FIRMWARE(huc_);
85
86 INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH)
87
88 /* The below structs and macros are used to iterate across the list of blobs */
89 struct __packed uc_fw_blob {
90 u8 major;
91 u8 minor;
92 const char *path;
93 };
94
95 #define UC_FW_BLOB(major_, minor_, path_) \
96 { .major = major_, .minor = minor_, .path = path_ }
97
98 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
99 UC_FW_BLOB(major_, minor_, \
100 MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
101
102 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
103 UC_FW_BLOB(major_, minor_, \
104 MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
105
106 struct __packed uc_fw_platform_requirement {
107 enum intel_platform p;
108 u8 rev; /* first platform rev using this FW */
109 const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES];
110 };
111
112 #define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
113 { \
114 .p = INTEL_##platform_, \
115 .rev = revid_, \
116 .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
117 .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
118 },
119
120 static void
__uc_fw_auto_select(struct drm_i915_private * i915,struct intel_uc_fw * uc_fw)121 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
122 {
123 static const struct uc_fw_platform_requirement fw_blobs[] = {
124 INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
125 };
126 enum intel_platform p = INTEL_INFO(i915)->platform;
127 u8 rev = INTEL_REVID(i915);
128 int i;
129
130 for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
131 if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
132 const struct uc_fw_blob *blob =
133 &fw_blobs[i].blobs[uc_fw->type];
134 uc_fw->path = blob->path;
135 uc_fw->major_ver_wanted = blob->major;
136 uc_fw->minor_ver_wanted = blob->minor;
137 break;
138 }
139 }
140
141 /* make sure the list is ordered as expected */
142 if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
143 for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) {
144 if (fw_blobs[i].p < fw_blobs[i - 1].p)
145 continue;
146
147 if (fw_blobs[i].p == fw_blobs[i - 1].p &&
148 fw_blobs[i].rev < fw_blobs[i - 1].rev)
149 continue;
150
151 pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
152 intel_platform_name(fw_blobs[i - 1].p),
153 fw_blobs[i - 1].rev,
154 intel_platform_name(fw_blobs[i].p),
155 fw_blobs[i].rev);
156
157 uc_fw->path = NULL;
158 }
159 }
160 }
161
__override_guc_firmware_path(struct drm_i915_private * i915)162 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
163 {
164 if (i915->params.enable_guc & ENABLE_GUC_MASK)
165 return i915->params.guc_firmware_path;
166 return "";
167 }
168
__override_huc_firmware_path(struct drm_i915_private * i915)169 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
170 {
171 if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
172 return i915->params.huc_firmware_path;
173 return "";
174 }
175
__uc_fw_user_override(struct drm_i915_private * i915,struct intel_uc_fw * uc_fw)176 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
177 {
178 const char *path = NULL;
179
180 switch (uc_fw->type) {
181 case INTEL_UC_FW_TYPE_GUC:
182 path = __override_guc_firmware_path(i915);
183 break;
184 case INTEL_UC_FW_TYPE_HUC:
185 path = __override_huc_firmware_path(i915);
186 break;
187 }
188
189 if (unlikely(path)) {
190 uc_fw->path = path;
191 uc_fw->user_overridden = true;
192 }
193 }
194
195 /**
196 * intel_uc_fw_init_early - initialize the uC object and select the firmware
197 * @uc_fw: uC firmware
198 * @type: type of uC
199 *
200 * Initialize the state of our uC object and relevant tracking and select the
201 * firmware to fetch and load.
202 */
intel_uc_fw_init_early(struct intel_uc_fw * uc_fw,enum intel_uc_fw_type type)203 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
204 enum intel_uc_fw_type type)
205 {
206 struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
207
208 /*
209 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
210 * before we're looked at the HW caps to see if we have uc support
211 */
212 BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
213 GEM_BUG_ON(uc_fw->status);
214 GEM_BUG_ON(uc_fw->path);
215
216 uc_fw->type = type;
217
218 if (HAS_GT_UC(i915)) {
219 __uc_fw_auto_select(i915, uc_fw);
220 __uc_fw_user_override(i915, uc_fw);
221 }
222
223 intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
224 INTEL_UC_FIRMWARE_SELECTED :
225 INTEL_UC_FIRMWARE_DISABLED :
226 INTEL_UC_FIRMWARE_NOT_SUPPORTED);
227 }
228
__force_fw_fetch_failures(struct intel_uc_fw * uc_fw,int e)229 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
230 {
231 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
232 bool user = e == -EINVAL;
233
234 if (i915_inject_probe_error(i915, e)) {
235 /* non-existing blob */
236 uc_fw->path = "<invalid>";
237 uc_fw->user_overridden = user;
238 } else if (i915_inject_probe_error(i915, e)) {
239 /* require next major version */
240 uc_fw->major_ver_wanted += 1;
241 uc_fw->minor_ver_wanted = 0;
242 uc_fw->user_overridden = user;
243 } else if (i915_inject_probe_error(i915, e)) {
244 /* require next minor version */
245 uc_fw->minor_ver_wanted += 1;
246 uc_fw->user_overridden = user;
247 } else if (uc_fw->major_ver_wanted &&
248 i915_inject_probe_error(i915, e)) {
249 /* require prev major version */
250 uc_fw->major_ver_wanted -= 1;
251 uc_fw->minor_ver_wanted = 0;
252 uc_fw->user_overridden = user;
253 } else if (uc_fw->minor_ver_wanted &&
254 i915_inject_probe_error(i915, e)) {
255 /* require prev minor version - hey, this should work! */
256 uc_fw->minor_ver_wanted -= 1;
257 uc_fw->user_overridden = user;
258 } else if (user && i915_inject_probe_error(i915, e)) {
259 /* officially unsupported platform */
260 uc_fw->major_ver_wanted = 0;
261 uc_fw->minor_ver_wanted = 0;
262 uc_fw->user_overridden = true;
263 }
264 }
265
266 /**
267 * intel_uc_fw_fetch - fetch uC firmware
268 * @uc_fw: uC firmware
269 *
270 * Fetch uC firmware into GEM obj.
271 *
272 * Return: 0 on success, a negative errno code on failure.
273 */
intel_uc_fw_fetch(struct intel_uc_fw * uc_fw)274 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
275 {
276 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
277 struct device *dev = i915->drm.dev;
278 struct drm_i915_gem_object *obj;
279 const struct firmware *fw = NULL;
280 struct uc_css_header *css;
281 size_t size;
282 int err;
283
284 GEM_BUG_ON(!i915->wopcm.size);
285 GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
286
287 err = i915_inject_probe_error(i915, -ENXIO);
288 if (err)
289 goto fail;
290
291 __force_fw_fetch_failures(uc_fw, -EINVAL);
292 __force_fw_fetch_failures(uc_fw, -ESTALE);
293
294 err = request_firmware(&fw, uc_fw->path, dev);
295 if (err)
296 goto fail;
297
298 /* Check the size of the blob before examining buffer contents */
299 if (unlikely(fw->size < sizeof(struct uc_css_header))) {
300 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
301 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
302 fw->size, sizeof(struct uc_css_header));
303 err = -ENODATA;
304 goto fail;
305 }
306
307 css = (struct uc_css_header *)fw->data;
308
309 /* Check integrity of size values inside CSS header */
310 size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
311 css->exponent_size_dw) * sizeof(u32);
312 if (unlikely(size != sizeof(struct uc_css_header))) {
313 drm_warn(&i915->drm,
314 "%s firmware %s: unexpected header size: %zu != %zu\n",
315 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
316 fw->size, sizeof(struct uc_css_header));
317 err = -EPROTO;
318 goto fail;
319 }
320
321 /* uCode size must calculated from other sizes */
322 uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
323
324 /* now RSA */
325 if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
326 drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
327 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
328 css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
329 err = -EPROTO;
330 goto fail;
331 }
332 uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
333
334 /* At least, it should have header, uCode and RSA. Size of all three. */
335 size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
336 if (unlikely(fw->size < size)) {
337 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
338 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
339 fw->size, size);
340 err = -ENOEXEC;
341 goto fail;
342 }
343
344 /* Sanity check whether this fw is not larger than whole WOPCM memory */
345 size = __intel_uc_fw_get_upload_size(uc_fw);
346 if (unlikely(size >= i915->wopcm.size)) {
347 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
348 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
349 size, (size_t)i915->wopcm.size);
350 err = -E2BIG;
351 goto fail;
352 }
353
354 /* Get version numbers from the CSS header */
355 uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
356 css->sw_version);
357 uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
358 css->sw_version);
359
360 if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
361 uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
362 drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
363 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
364 uc_fw->major_ver_found, uc_fw->minor_ver_found,
365 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
366 if (!intel_uc_fw_is_overridden(uc_fw)) {
367 err = -ENOEXEC;
368 goto fail;
369 }
370 }
371
372 if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
373 uc_fw->private_data_size = css->private_data_size;
374
375 if (HAS_LMEM(i915)) {
376 obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
377 if (!IS_ERR(obj))
378 obj->flags |= I915_BO_ALLOC_PM_EARLY;
379 } else {
380 obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
381 }
382
383 if (IS_ERR(obj)) {
384 err = PTR_ERR(obj);
385 goto fail;
386 }
387
388 uc_fw->obj = obj;
389 uc_fw->size = fw->size;
390 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
391
392 release_firmware(fw);
393 return 0;
394
395 fail:
396 intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
397 INTEL_UC_FIRMWARE_MISSING :
398 INTEL_UC_FIRMWARE_ERROR);
399
400 drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
401 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
402 drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
403 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
404
405 release_firmware(fw); /* OK even if fw is NULL */
406 return err;
407 }
408
uc_fw_ggtt_offset(struct intel_uc_fw * uc_fw)409 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
410 {
411 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
412 struct drm_mm_node *node = &ggtt->uc_fw;
413
414 GEM_BUG_ON(!drm_mm_node_allocated(node));
415 GEM_BUG_ON(upper_32_bits(node->start));
416 GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
417
418 return lower_32_bits(node->start);
419 }
420
uc_fw_bind_ggtt(struct intel_uc_fw * uc_fw)421 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
422 {
423 struct drm_i915_gem_object *obj = uc_fw->obj;
424 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
425 struct i915_vma *dummy = &uc_fw->dummy;
426 u32 pte_flags = 0;
427
428 dummy->node.start = uc_fw_ggtt_offset(uc_fw);
429 dummy->node.size = obj->base.size;
430 dummy->pages = obj->mm.pages;
431 dummy->vm = &ggtt->vm;
432
433 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
434 GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size);
435
436 /* uc_fw->obj cache domains were not controlled across suspend */
437 if (i915_gem_object_has_struct_page(obj))
438 drm_clflush_sg(dummy->pages);
439
440 if (i915_gem_object_is_lmem(obj))
441 pte_flags |= PTE_LM;
442
443 ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
444 }
445
uc_fw_unbind_ggtt(struct intel_uc_fw * uc_fw)446 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
447 {
448 struct drm_i915_gem_object *obj = uc_fw->obj;
449 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
450 u64 start = uc_fw_ggtt_offset(uc_fw);
451
452 ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
453 }
454
uc_fw_xfer(struct intel_uc_fw * uc_fw,u32 dst_offset,u32 dma_flags)455 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
456 {
457 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
458 struct intel_uncore *uncore = gt->uncore;
459 u64 offset;
460 int ret;
461
462 ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
463 if (ret)
464 return ret;
465
466 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
467
468 /* Set the source address for the uCode */
469 offset = uc_fw_ggtt_offset(uc_fw);
470 GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
471 intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
472 intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
473
474 /* Set the DMA destination */
475 intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
476 intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
477
478 /*
479 * Set the transfer size. The header plus uCode will be copied to WOPCM
480 * via DMA, excluding any other components
481 */
482 intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
483 sizeof(struct uc_css_header) + uc_fw->ucode_size);
484
485 /* Start the DMA */
486 intel_uncore_write_fw(uncore, DMA_CTRL,
487 _MASKED_BIT_ENABLE(dma_flags | START_DMA));
488
489 /* Wait for DMA to finish */
490 ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
491 if (ret)
492 drm_err(>->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
493 intel_uc_fw_type_repr(uc_fw->type),
494 intel_uncore_read_fw(uncore, DMA_CTRL));
495
496 /* Disable the bits once DMA is over */
497 intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
498
499 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
500
501 return ret;
502 }
503
504 /**
505 * intel_uc_fw_upload - load uC firmware using custom loader
506 * @uc_fw: uC firmware
507 * @dst_offset: destination offset
508 * @dma_flags: flags for flags for dma ctrl
509 *
510 * Loads uC firmware and updates internal flags.
511 *
512 * Return: 0 on success, non-zero on failure.
513 */
intel_uc_fw_upload(struct intel_uc_fw * uc_fw,u32 dst_offset,u32 dma_flags)514 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
515 {
516 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
517 int err;
518
519 /* make sure the status was cleared the last time we reset the uc */
520 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
521
522 err = i915_inject_probe_error(gt->i915, -ENOEXEC);
523 if (err)
524 return err;
525
526 if (!intel_uc_fw_is_loadable(uc_fw))
527 return -ENOEXEC;
528
529 /* Call custom loader */
530 uc_fw_bind_ggtt(uc_fw);
531 err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
532 uc_fw_unbind_ggtt(uc_fw);
533 if (err)
534 goto fail;
535
536 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
537 return 0;
538
539 fail:
540 i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
541 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
542 err);
543 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
544 return err;
545 }
546
intel_uc_fw_init(struct intel_uc_fw * uc_fw)547 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
548 {
549 int err;
550
551 /* this should happen before the load! */
552 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
553
554 if (!intel_uc_fw_is_available(uc_fw))
555 return -ENOEXEC;
556
557 err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
558 if (err) {
559 DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
560 intel_uc_fw_type_repr(uc_fw->type), err);
561 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
562 }
563
564 return err;
565 }
566
intel_uc_fw_fini(struct intel_uc_fw * uc_fw)567 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
568 {
569 if (i915_gem_object_has_pinned_pages(uc_fw->obj))
570 i915_gem_object_unpin_pages(uc_fw->obj);
571
572 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
573 }
574
575 /**
576 * intel_uc_fw_cleanup_fetch - cleanup uC firmware
577 * @uc_fw: uC firmware
578 *
579 * Cleans up uC firmware by releasing the firmware GEM obj.
580 */
intel_uc_fw_cleanup_fetch(struct intel_uc_fw * uc_fw)581 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
582 {
583 if (!intel_uc_fw_is_available(uc_fw))
584 return;
585
586 i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
587
588 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
589 }
590
591 /**
592 * intel_uc_fw_copy_rsa - copy fw RSA to buffer
593 *
594 * @uc_fw: uC firmware
595 * @dst: dst buffer
596 * @max_len: max number of bytes to copy
597 *
598 * Return: number of copied bytes.
599 */
intel_uc_fw_copy_rsa(struct intel_uc_fw * uc_fw,void * dst,u32 max_len)600 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
601 {
602 struct intel_memory_region *mr = uc_fw->obj->mm.region;
603 u32 size = min_t(u32, uc_fw->rsa_size, max_len);
604 u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
605 struct sgt_iter iter;
606 size_t count = 0;
607 int idx;
608
609 /* Called during reset handling, must be atomic [no fs_reclaim] */
610 GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
611
612 idx = offset >> PAGE_SHIFT;
613 offset = offset_in_page(offset);
614 if (i915_gem_object_has_struct_page(uc_fw->obj)) {
615 struct page *page;
616
617 for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
618 u32 len = min_t(u32, size, PAGE_SIZE - offset);
619 void *vaddr;
620
621 if (idx > 0) {
622 idx--;
623 continue;
624 }
625
626 vaddr = kmap_atomic(page);
627 memcpy(dst, vaddr + offset, len);
628 kunmap_atomic(vaddr);
629
630 offset = 0;
631 dst += len;
632 size -= len;
633 count += len;
634 if (!size)
635 break;
636 }
637 } else {
638 dma_addr_t addr;
639
640 for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
641 u32 len = min_t(u32, size, PAGE_SIZE - offset);
642 void __iomem *vaddr;
643
644 if (idx > 0) {
645 idx--;
646 continue;
647 }
648
649 vaddr = io_mapping_map_atomic_wc(&mr->iomap,
650 addr - mr->region.start);
651 memcpy_fromio(dst, vaddr + offset, len);
652 io_mapping_unmap_atomic(vaddr);
653
654 offset = 0;
655 dst += len;
656 size -= len;
657 count += len;
658 if (!size)
659 break;
660 }
661 }
662
663 return count;
664 }
665
666 /**
667 * intel_uc_fw_dump - dump information about uC firmware
668 * @uc_fw: uC firmware
669 * @p: the &drm_printer
670 *
671 * Pretty printer for uC firmware.
672 */
intel_uc_fw_dump(const struct intel_uc_fw * uc_fw,struct drm_printer * p)673 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
674 {
675 drm_printf(p, "%s firmware: %s\n",
676 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
677 drm_printf(p, "\tstatus: %s\n",
678 intel_uc_fw_status_repr(uc_fw->status));
679 drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
680 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
681 uc_fw->major_ver_found, uc_fw->minor_ver_found);
682 drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
683 drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
684 }
685