1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26
27 #include <drm/drm_cache.h>
28
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
42
43 #include "soc15.h"
44 #include "soc15d.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "mmhub_v1_7.h"
54 #include "umc_v6_1.h"
55 #include "umc_v6_0.h"
56 #include "umc_v6_7.h"
57 #include "hdp_v4_0.h"
58 #include "mca_v3_0.h"
59
60 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
61
62 #include "amdgpu_ras.h"
63 #include "amdgpu_xgmi.h"
64
65 /* add these here since we already include dce12 headers and these are for DCN */
66 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
67 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
69 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
70 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
72 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
73 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
74
75
76 static const char *gfxhub_client_ids[] = {
77 "CB",
78 "DB",
79 "IA",
80 "WD",
81 "CPF",
82 "CPC",
83 "CPG",
84 "RLC",
85 "TCP",
86 "SQC (inst)",
87 "SQC (data)",
88 "SQG",
89 "PA",
90 };
91
92 static const char *mmhub_client_ids_raven[][2] = {
93 [0][0] = "MP1",
94 [1][0] = "MP0",
95 [2][0] = "VCN",
96 [3][0] = "VCNU",
97 [4][0] = "HDP",
98 [5][0] = "DCE",
99 [13][0] = "UTCL2",
100 [19][0] = "TLS",
101 [26][0] = "OSS",
102 [27][0] = "SDMA0",
103 [0][1] = "MP1",
104 [1][1] = "MP0",
105 [2][1] = "VCN",
106 [3][1] = "VCNU",
107 [4][1] = "HDP",
108 [5][1] = "XDP",
109 [6][1] = "DBGU0",
110 [7][1] = "DCE",
111 [8][1] = "DCEDWB0",
112 [9][1] = "DCEDWB1",
113 [26][1] = "OSS",
114 [27][1] = "SDMA0",
115 };
116
117 static const char *mmhub_client_ids_renoir[][2] = {
118 [0][0] = "MP1",
119 [1][0] = "MP0",
120 [2][0] = "HDP",
121 [4][0] = "DCEDMC",
122 [5][0] = "DCEVGA",
123 [13][0] = "UTCL2",
124 [19][0] = "TLS",
125 [26][0] = "OSS",
126 [27][0] = "SDMA0",
127 [28][0] = "VCN",
128 [29][0] = "VCNU",
129 [30][0] = "JPEG",
130 [0][1] = "MP1",
131 [1][1] = "MP0",
132 [2][1] = "HDP",
133 [3][1] = "XDP",
134 [6][1] = "DBGU0",
135 [7][1] = "DCEDMC",
136 [8][1] = "DCEVGA",
137 [9][1] = "DCEDWB",
138 [26][1] = "OSS",
139 [27][1] = "SDMA0",
140 [28][1] = "VCN",
141 [29][1] = "VCNU",
142 [30][1] = "JPEG",
143 };
144
145 static const char *mmhub_client_ids_vega10[][2] = {
146 [0][0] = "MP0",
147 [1][0] = "UVD",
148 [2][0] = "UVDU",
149 [3][0] = "HDP",
150 [13][0] = "UTCL2",
151 [14][0] = "OSS",
152 [15][0] = "SDMA1",
153 [32+0][0] = "VCE0",
154 [32+1][0] = "VCE0U",
155 [32+2][0] = "XDMA",
156 [32+3][0] = "DCE",
157 [32+4][0] = "MP1",
158 [32+14][0] = "SDMA0",
159 [0][1] = "MP0",
160 [1][1] = "UVD",
161 [2][1] = "UVDU",
162 [3][1] = "DBGU0",
163 [4][1] = "HDP",
164 [5][1] = "XDP",
165 [14][1] = "OSS",
166 [15][1] = "SDMA0",
167 [32+0][1] = "VCE0",
168 [32+1][1] = "VCE0U",
169 [32+2][1] = "XDMA",
170 [32+3][1] = "DCE",
171 [32+4][1] = "DCEDWB",
172 [32+5][1] = "MP1",
173 [32+6][1] = "DBGU1",
174 [32+14][1] = "SDMA1",
175 };
176
177 static const char *mmhub_client_ids_vega12[][2] = {
178 [0][0] = "MP0",
179 [1][0] = "VCE0",
180 [2][0] = "VCE0U",
181 [3][0] = "HDP",
182 [13][0] = "UTCL2",
183 [14][0] = "OSS",
184 [15][0] = "SDMA1",
185 [32+0][0] = "DCE",
186 [32+1][0] = "XDMA",
187 [32+2][0] = "UVD",
188 [32+3][0] = "UVDU",
189 [32+4][0] = "MP1",
190 [32+15][0] = "SDMA0",
191 [0][1] = "MP0",
192 [1][1] = "VCE0",
193 [2][1] = "VCE0U",
194 [3][1] = "DBGU0",
195 [4][1] = "HDP",
196 [5][1] = "XDP",
197 [14][1] = "OSS",
198 [15][1] = "SDMA0",
199 [32+0][1] = "DCE",
200 [32+1][1] = "DCEDWB",
201 [32+2][1] = "XDMA",
202 [32+3][1] = "UVD",
203 [32+4][1] = "UVDU",
204 [32+5][1] = "MP1",
205 [32+6][1] = "DBGU1",
206 [32+15][1] = "SDMA1",
207 };
208
209 static const char *mmhub_client_ids_vega20[][2] = {
210 [0][0] = "XDMA",
211 [1][0] = "DCE",
212 [2][0] = "VCE0",
213 [3][0] = "VCE0U",
214 [4][0] = "UVD",
215 [5][0] = "UVD1U",
216 [13][0] = "OSS",
217 [14][0] = "HDP",
218 [15][0] = "SDMA0",
219 [32+0][0] = "UVD",
220 [32+1][0] = "UVDU",
221 [32+2][0] = "MP1",
222 [32+3][0] = "MP0",
223 [32+12][0] = "UTCL2",
224 [32+14][0] = "SDMA1",
225 [0][1] = "XDMA",
226 [1][1] = "DCE",
227 [2][1] = "DCEDWB",
228 [3][1] = "VCE0",
229 [4][1] = "VCE0U",
230 [5][1] = "UVD1",
231 [6][1] = "UVD1U",
232 [7][1] = "DBGU0",
233 [8][1] = "XDP",
234 [13][1] = "OSS",
235 [14][1] = "HDP",
236 [15][1] = "SDMA0",
237 [32+0][1] = "UVD",
238 [32+1][1] = "UVDU",
239 [32+2][1] = "DBGU1",
240 [32+3][1] = "MP1",
241 [32+4][1] = "MP0",
242 [32+14][1] = "SDMA1",
243 };
244
245 static const char *mmhub_client_ids_arcturus[][2] = {
246 [0][0] = "DBGU1",
247 [1][0] = "XDP",
248 [2][0] = "MP1",
249 [14][0] = "HDP",
250 [171][0] = "JPEG",
251 [172][0] = "VCN",
252 [173][0] = "VCNU",
253 [203][0] = "JPEG1",
254 [204][0] = "VCN1",
255 [205][0] = "VCN1U",
256 [256][0] = "SDMA0",
257 [257][0] = "SDMA1",
258 [258][0] = "SDMA2",
259 [259][0] = "SDMA3",
260 [260][0] = "SDMA4",
261 [261][0] = "SDMA5",
262 [262][0] = "SDMA6",
263 [263][0] = "SDMA7",
264 [384][0] = "OSS",
265 [0][1] = "DBGU1",
266 [1][1] = "XDP",
267 [2][1] = "MP1",
268 [14][1] = "HDP",
269 [171][1] = "JPEG",
270 [172][1] = "VCN",
271 [173][1] = "VCNU",
272 [203][1] = "JPEG1",
273 [204][1] = "VCN1",
274 [205][1] = "VCN1U",
275 [256][1] = "SDMA0",
276 [257][1] = "SDMA1",
277 [258][1] = "SDMA2",
278 [259][1] = "SDMA3",
279 [260][1] = "SDMA4",
280 [261][1] = "SDMA5",
281 [262][1] = "SDMA6",
282 [263][1] = "SDMA7",
283 [384][1] = "OSS",
284 };
285
286 static const char *mmhub_client_ids_aldebaran[][2] = {
287 [2][0] = "MP1",
288 [3][0] = "MP0",
289 [32+1][0] = "DBGU_IO0",
290 [32+2][0] = "DBGU_IO2",
291 [32+4][0] = "MPIO",
292 [96+11][0] = "JPEG0",
293 [96+12][0] = "VCN0",
294 [96+13][0] = "VCNU0",
295 [128+11][0] = "JPEG1",
296 [128+12][0] = "VCN1",
297 [128+13][0] = "VCNU1",
298 [160+1][0] = "XDP",
299 [160+14][0] = "HDP",
300 [256+0][0] = "SDMA0",
301 [256+1][0] = "SDMA1",
302 [256+2][0] = "SDMA2",
303 [256+3][0] = "SDMA3",
304 [256+4][0] = "SDMA4",
305 [384+0][0] = "OSS",
306 [2][1] = "MP1",
307 [3][1] = "MP0",
308 [32+1][1] = "DBGU_IO0",
309 [32+2][1] = "DBGU_IO2",
310 [32+4][1] = "MPIO",
311 [96+11][1] = "JPEG0",
312 [96+12][1] = "VCN0",
313 [96+13][1] = "VCNU0",
314 [128+11][1] = "JPEG1",
315 [128+12][1] = "VCN1",
316 [128+13][1] = "VCNU1",
317 [160+1][1] = "XDP",
318 [160+14][1] = "HDP",
319 [256+0][1] = "SDMA0",
320 [256+1][1] = "SDMA1",
321 [256+2][1] = "SDMA2",
322 [256+3][1] = "SDMA3",
323 [256+4][1] = "SDMA4",
324 [384+0][1] = "OSS",
325 };
326
327 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
328 {
329 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
330 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
331 };
332
333 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
334 {
335 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
336 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
337 };
338
339 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
340 (0x000143c0 + 0x00000000),
341 (0x000143c0 + 0x00000800),
342 (0x000143c0 + 0x00001000),
343 (0x000143c0 + 0x00001800),
344 (0x000543c0 + 0x00000000),
345 (0x000543c0 + 0x00000800),
346 (0x000543c0 + 0x00001000),
347 (0x000543c0 + 0x00001800),
348 (0x000943c0 + 0x00000000),
349 (0x000943c0 + 0x00000800),
350 (0x000943c0 + 0x00001000),
351 (0x000943c0 + 0x00001800),
352 (0x000d43c0 + 0x00000000),
353 (0x000d43c0 + 0x00000800),
354 (0x000d43c0 + 0x00001000),
355 (0x000d43c0 + 0x00001800),
356 (0x001143c0 + 0x00000000),
357 (0x001143c0 + 0x00000800),
358 (0x001143c0 + 0x00001000),
359 (0x001143c0 + 0x00001800),
360 (0x001543c0 + 0x00000000),
361 (0x001543c0 + 0x00000800),
362 (0x001543c0 + 0x00001000),
363 (0x001543c0 + 0x00001800),
364 (0x001943c0 + 0x00000000),
365 (0x001943c0 + 0x00000800),
366 (0x001943c0 + 0x00001000),
367 (0x001943c0 + 0x00001800),
368 (0x001d43c0 + 0x00000000),
369 (0x001d43c0 + 0x00000800),
370 (0x001d43c0 + 0x00001000),
371 (0x001d43c0 + 0x00001800),
372 };
373
374 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
375 (0x000143e0 + 0x00000000),
376 (0x000143e0 + 0x00000800),
377 (0x000143e0 + 0x00001000),
378 (0x000143e0 + 0x00001800),
379 (0x000543e0 + 0x00000000),
380 (0x000543e0 + 0x00000800),
381 (0x000543e0 + 0x00001000),
382 (0x000543e0 + 0x00001800),
383 (0x000943e0 + 0x00000000),
384 (0x000943e0 + 0x00000800),
385 (0x000943e0 + 0x00001000),
386 (0x000943e0 + 0x00001800),
387 (0x000d43e0 + 0x00000000),
388 (0x000d43e0 + 0x00000800),
389 (0x000d43e0 + 0x00001000),
390 (0x000d43e0 + 0x00001800),
391 (0x001143e0 + 0x00000000),
392 (0x001143e0 + 0x00000800),
393 (0x001143e0 + 0x00001000),
394 (0x001143e0 + 0x00001800),
395 (0x001543e0 + 0x00000000),
396 (0x001543e0 + 0x00000800),
397 (0x001543e0 + 0x00001000),
398 (0x001543e0 + 0x00001800),
399 (0x001943e0 + 0x00000000),
400 (0x001943e0 + 0x00000800),
401 (0x001943e0 + 0x00001000),
402 (0x001943e0 + 0x00001800),
403 (0x001d43e0 + 0x00000000),
404 (0x001d43e0 + 0x00000800),
405 (0x001d43e0 + 0x00001000),
406 (0x001d43e0 + 0x00001800),
407 };
408
gmc_v9_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)409 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
410 struct amdgpu_irq_src *src,
411 unsigned type,
412 enum amdgpu_interrupt_state state)
413 {
414 u32 bits, i, tmp, reg;
415
416 /* Devices newer then VEGA10/12 shall have these programming
417 sequences performed by PSP BL */
418 if (adev->asic_type >= CHIP_VEGA20)
419 return 0;
420
421 bits = 0x7f;
422
423 switch (state) {
424 case AMDGPU_IRQ_STATE_DISABLE:
425 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
426 reg = ecc_umc_mcumc_ctrl_addrs[i];
427 tmp = RREG32(reg);
428 tmp &= ~bits;
429 WREG32(reg, tmp);
430 }
431 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
432 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
433 tmp = RREG32(reg);
434 tmp &= ~bits;
435 WREG32(reg, tmp);
436 }
437 break;
438 case AMDGPU_IRQ_STATE_ENABLE:
439 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
440 reg = ecc_umc_mcumc_ctrl_addrs[i];
441 tmp = RREG32(reg);
442 tmp |= bits;
443 WREG32(reg, tmp);
444 }
445 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
446 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
447 tmp = RREG32(reg);
448 tmp |= bits;
449 WREG32(reg, tmp);
450 }
451 break;
452 default:
453 break;
454 }
455
456 return 0;
457 }
458
gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)459 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
460 struct amdgpu_irq_src *src,
461 unsigned type,
462 enum amdgpu_interrupt_state state)
463 {
464 struct amdgpu_vmhub *hub;
465 u32 tmp, reg, bits, i, j;
466
467 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
468 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
469 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
470 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
471 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
473 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
474
475 switch (state) {
476 case AMDGPU_IRQ_STATE_DISABLE:
477 for (j = 0; j < adev->num_vmhubs; j++) {
478 hub = &adev->vmhub[j];
479 for (i = 0; i < 16; i++) {
480 reg = hub->vm_context0_cntl + i;
481 tmp = RREG32(reg);
482 tmp &= ~bits;
483 WREG32(reg, tmp);
484 }
485 }
486 break;
487 case AMDGPU_IRQ_STATE_ENABLE:
488 for (j = 0; j < adev->num_vmhubs; j++) {
489 hub = &adev->vmhub[j];
490 for (i = 0; i < 16; i++) {
491 reg = hub->vm_context0_cntl + i;
492 tmp = RREG32(reg);
493 tmp |= bits;
494 WREG32(reg, tmp);
495 }
496 }
497 break;
498 default:
499 break;
500 }
501
502 return 0;
503 }
504
gmc_v9_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)505 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
506 struct amdgpu_irq_src *source,
507 struct amdgpu_iv_entry *entry)
508 {
509 bool retry_fault = !!(entry->src_data[1] & 0x80);
510 bool write_fault = !!(entry->src_data[1] & 0x20);
511 uint32_t status = 0, cid = 0, rw = 0;
512 struct amdgpu_task_info task_info;
513 struct amdgpu_vmhub *hub;
514 const char *mmhub_cid;
515 const char *hub_name;
516 u64 addr;
517
518 addr = (u64)entry->src_data[0] << 12;
519 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
520
521 if (retry_fault) {
522 /* Returning 1 here also prevents sending the IV to the KFD */
523
524 /* Process it onyl if it's the first fault for this address */
525 if (entry->ih != &adev->irq.ih_soft &&
526 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
527 entry->timestamp))
528 return 1;
529
530 /* Delegate it to a different ring if the hardware hasn't
531 * already done it.
532 */
533 if (entry->ih == &adev->irq.ih) {
534 amdgpu_irq_delegate(adev, entry, 8);
535 return 1;
536 }
537
538 /* Try to handle the recoverable page faults by filling page
539 * tables
540 */
541 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
542 return 1;
543 }
544
545 if (!printk_ratelimit())
546 return 0;
547
548 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
549 hub_name = "mmhub0";
550 hub = &adev->vmhub[AMDGPU_MMHUB_0];
551 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
552 hub_name = "mmhub1";
553 hub = &adev->vmhub[AMDGPU_MMHUB_1];
554 } else {
555 hub_name = "gfxhub0";
556 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
557 }
558
559 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
560 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
561
562 dev_err(adev->dev,
563 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
564 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
565 hub_name, retry_fault ? "retry" : "no-retry",
566 entry->src_id, entry->ring_id, entry->vmid,
567 entry->pasid, task_info.process_name, task_info.tgid,
568 task_info.task_name, task_info.pid);
569 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
570 addr, entry->client_id,
571 soc15_ih_clientid_name[entry->client_id]);
572
573 if (amdgpu_sriov_vf(adev))
574 return 0;
575
576 /*
577 * Issue a dummy read to wait for the status register to
578 * be updated to avoid reading an incorrect value due to
579 * the new fast GRBM interface.
580 */
581 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
582 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
583 RREG32(hub->vm_l2_pro_fault_status);
584
585 status = RREG32(hub->vm_l2_pro_fault_status);
586 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
587 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
588 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
589
590
591 dev_err(adev->dev,
592 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
593 status);
594 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
595 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
596 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
597 gfxhub_client_ids[cid],
598 cid);
599 } else {
600 switch (adev->ip_versions[MMHUB_HWIP][0]) {
601 case IP_VERSION(9, 0, 0):
602 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
603 break;
604 case IP_VERSION(9, 3, 0):
605 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
606 break;
607 case IP_VERSION(9, 4, 0):
608 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
609 break;
610 case IP_VERSION(9, 4, 1):
611 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
612 break;
613 case IP_VERSION(9, 1, 0):
614 case IP_VERSION(9, 2, 0):
615 mmhub_cid = mmhub_client_ids_raven[cid][rw];
616 break;
617 case IP_VERSION(1, 5, 0):
618 case IP_VERSION(2, 4, 0):
619 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
620 break;
621 case IP_VERSION(9, 4, 2):
622 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
623 break;
624 default:
625 mmhub_cid = NULL;
626 break;
627 }
628 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
629 mmhub_cid ? mmhub_cid : "unknown", cid);
630 }
631 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
632 REG_GET_FIELD(status,
633 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
634 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
635 REG_GET_FIELD(status,
636 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
637 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
638 REG_GET_FIELD(status,
639 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
640 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
641 REG_GET_FIELD(status,
642 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
643 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
644 return 0;
645 }
646
647 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
648 .set = gmc_v9_0_vm_fault_interrupt_state,
649 .process = gmc_v9_0_process_interrupt,
650 };
651
652
653 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
654 .set = gmc_v9_0_ecc_interrupt_state,
655 .process = amdgpu_umc_process_ecc_irq,
656 };
657
gmc_v9_0_set_irq_funcs(struct amdgpu_device * adev)658 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
659 {
660 adev->gmc.vm_fault.num_types = 1;
661 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
662
663 if (!amdgpu_sriov_vf(adev) &&
664 !adev->gmc.xgmi.connected_to_cpu) {
665 adev->gmc.ecc_irq.num_types = 1;
666 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
667 }
668 }
669
gmc_v9_0_get_invalidate_req(unsigned int vmid,uint32_t flush_type)670 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
671 uint32_t flush_type)
672 {
673 u32 req = 0;
674
675 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
676 PER_VMID_INVALIDATE_REQ, 1 << vmid);
677 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
678 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
679 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
680 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
681 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
682 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
683 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
684 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
685
686 return req;
687 }
688
689 /**
690 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
691 *
692 * @adev: amdgpu_device pointer
693 * @vmhub: vmhub type
694 *
695 */
gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)696 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
697 uint32_t vmhub)
698 {
699 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
700 return false;
701
702 return ((vmhub == AMDGPU_MMHUB_0 ||
703 vmhub == AMDGPU_MMHUB_1) &&
704 (!amdgpu_sriov_vf(adev)) &&
705 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
706 (adev->apu_flags & AMD_APU_IS_PICASSO))));
707 }
708
gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)709 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
710 uint8_t vmid, uint16_t *p_pasid)
711 {
712 uint32_t value;
713
714 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
715 + vmid);
716 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
717
718 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
719 }
720
721 /*
722 * GART
723 * VMID 0 is the physical GPU addresses as used by the kernel.
724 * VMIDs 1-15 are used for userspace clients and are handled
725 * by the amdgpu vm/hsa code.
726 */
727
728 /**
729 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
730 *
731 * @adev: amdgpu_device pointer
732 * @vmid: vm instance to flush
733 * @vmhub: which hub to flush
734 * @flush_type: the flush type
735 *
736 * Flush the TLB for the requested page table using certain type.
737 */
gmc_v9_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)738 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
739 uint32_t vmhub, uint32_t flush_type)
740 {
741 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
742 const unsigned eng = 17;
743 u32 j, inv_req, inv_req2, tmp;
744 struct amdgpu_vmhub *hub;
745
746 BUG_ON(vmhub >= adev->num_vmhubs);
747
748 hub = &adev->vmhub[vmhub];
749 if (adev->gmc.xgmi.num_physical_nodes &&
750 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
751 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
752 * heavy-weight TLB flush (type 2), which flushes
753 * both. Due to a race condition with concurrent
754 * memory accesses using the same TLB cache line, we
755 * still need a second TLB flush after this.
756 */
757 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
758 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
759 } else {
760 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
761 inv_req2 = 0;
762 }
763
764 /* This is necessary for a HW workaround under SRIOV as well
765 * as GFXOFF under bare metal
766 */
767 if (adev->gfx.kiq.ring.sched.ready &&
768 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
769 down_read_trylock(&adev->reset_sem)) {
770 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
771 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
772
773 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
774 1 << vmid);
775 up_read(&adev->reset_sem);
776 return;
777 }
778
779 spin_lock(&adev->gmc.invalidate_lock);
780
781 /*
782 * It may lose gpuvm invalidate acknowldege state across power-gating
783 * off cycle, add semaphore acquire before invalidation and semaphore
784 * release after invalidation to avoid entering power gated state
785 * to WA the Issue
786 */
787
788 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
789 if (use_semaphore) {
790 for (j = 0; j < adev->usec_timeout; j++) {
791 /* a read return value of 1 means semaphore acuqire */
792 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
793 hub->eng_distance * eng);
794 if (tmp & 0x1)
795 break;
796 udelay(1);
797 }
798
799 if (j >= adev->usec_timeout)
800 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
801 }
802
803 do {
804 WREG32_NO_KIQ(hub->vm_inv_eng0_req +
805 hub->eng_distance * eng, inv_req);
806
807 /*
808 * Issue a dummy read to wait for the ACK register to
809 * be cleared to avoid a false ACK due to the new fast
810 * GRBM interface.
811 */
812 if ((vmhub == AMDGPU_GFXHUB_0) &&
813 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
814 RREG32_NO_KIQ(hub->vm_inv_eng0_req +
815 hub->eng_distance * eng);
816
817 for (j = 0; j < adev->usec_timeout; j++) {
818 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
819 hub->eng_distance * eng);
820 if (tmp & (1 << vmid))
821 break;
822 udelay(1);
823 }
824
825 inv_req = inv_req2;
826 inv_req2 = 0;
827 } while (inv_req);
828
829 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
830 if (use_semaphore)
831 /*
832 * add semaphore release after invalidation,
833 * write with 0 means semaphore release
834 */
835 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
836 hub->eng_distance * eng, 0);
837
838 spin_unlock(&adev->gmc.invalidate_lock);
839
840 if (j < adev->usec_timeout)
841 return;
842
843 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
844 }
845
846 /**
847 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
848 *
849 * @adev: amdgpu_device pointer
850 * @pasid: pasid to be flush
851 * @flush_type: the flush type
852 * @all_hub: flush all hubs
853 *
854 * Flush the TLB for the requested pasid.
855 */
gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub)856 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
857 uint16_t pasid, uint32_t flush_type,
858 bool all_hub)
859 {
860 int vmid, i;
861 signed long r;
862 uint32_t seq;
863 uint16_t queried_pasid;
864 bool ret;
865 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
866 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
867
868 if (amdgpu_in_reset(adev))
869 return -EIO;
870
871 if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
872 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
873 * heavy-weight TLB flush (type 2), which flushes
874 * both. Due to a race condition with concurrent
875 * memory accesses using the same TLB cache line, we
876 * still need a second TLB flush after this.
877 */
878 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
879 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
880 /* 2 dwords flush + 8 dwords fence */
881 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
882
883 if (vega20_xgmi_wa)
884 ndw += kiq->pmf->invalidate_tlbs_size;
885
886 spin_lock(&adev->gfx.kiq.ring_lock);
887 /* 2 dwords flush + 8 dwords fence */
888 amdgpu_ring_alloc(ring, ndw);
889 if (vega20_xgmi_wa)
890 kiq->pmf->kiq_invalidate_tlbs(ring,
891 pasid, 2, all_hub);
892 kiq->pmf->kiq_invalidate_tlbs(ring,
893 pasid, flush_type, all_hub);
894 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
895 if (r) {
896 amdgpu_ring_undo(ring);
897 spin_unlock(&adev->gfx.kiq.ring_lock);
898 up_read(&adev->reset_sem);
899 return -ETIME;
900 }
901
902 amdgpu_ring_commit(ring);
903 spin_unlock(&adev->gfx.kiq.ring_lock);
904 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
905 if (r < 1) {
906 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
907 up_read(&adev->reset_sem);
908 return -ETIME;
909 }
910 up_read(&adev->reset_sem);
911 return 0;
912 }
913
914 for (vmid = 1; vmid < 16; vmid++) {
915
916 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
917 &queried_pasid);
918 if (ret && queried_pasid == pasid) {
919 if (all_hub) {
920 for (i = 0; i < adev->num_vmhubs; i++)
921 gmc_v9_0_flush_gpu_tlb(adev, vmid,
922 i, flush_type);
923 } else {
924 gmc_v9_0_flush_gpu_tlb(adev, vmid,
925 AMDGPU_GFXHUB_0, flush_type);
926 }
927 break;
928 }
929 }
930
931 return 0;
932
933 }
934
gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)935 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
936 unsigned vmid, uint64_t pd_addr)
937 {
938 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
939 struct amdgpu_device *adev = ring->adev;
940 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
941 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
942 unsigned eng = ring->vm_inv_eng;
943
944 /*
945 * It may lose gpuvm invalidate acknowldege state across power-gating
946 * off cycle, add semaphore acquire before invalidation and semaphore
947 * release after invalidation to avoid entering power gated state
948 * to WA the Issue
949 */
950
951 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
952 if (use_semaphore)
953 /* a read return value of 1 means semaphore acuqire */
954 amdgpu_ring_emit_reg_wait(ring,
955 hub->vm_inv_eng0_sem +
956 hub->eng_distance * eng, 0x1, 0x1);
957
958 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
959 (hub->ctx_addr_distance * vmid),
960 lower_32_bits(pd_addr));
961
962 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
963 (hub->ctx_addr_distance * vmid),
964 upper_32_bits(pd_addr));
965
966 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
967 hub->eng_distance * eng,
968 hub->vm_inv_eng0_ack +
969 hub->eng_distance * eng,
970 req, 1 << vmid);
971
972 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
973 if (use_semaphore)
974 /*
975 * add semaphore release after invalidation,
976 * write with 0 means semaphore release
977 */
978 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
979 hub->eng_distance * eng, 0);
980
981 return pd_addr;
982 }
983
gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)984 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
985 unsigned pasid)
986 {
987 struct amdgpu_device *adev = ring->adev;
988 uint32_t reg;
989
990 /* Do nothing because there's no lut register for mmhub1. */
991 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
992 return;
993
994 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
995 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
996 else
997 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
998
999 amdgpu_ring_emit_wreg(ring, reg, pasid);
1000 }
1001
1002 /*
1003 * PTE format on VEGA 10:
1004 * 63:59 reserved
1005 * 58:57 mtype
1006 * 56 F
1007 * 55 L
1008 * 54 P
1009 * 53 SW
1010 * 52 T
1011 * 50:48 reserved
1012 * 47:12 4k physical page base address
1013 * 11:7 fragment
1014 * 6 write
1015 * 5 read
1016 * 4 exe
1017 * 3 Z
1018 * 2 snooped
1019 * 1 system
1020 * 0 valid
1021 *
1022 * PDE format on VEGA 10:
1023 * 63:59 block fragment size
1024 * 58:55 reserved
1025 * 54 P
1026 * 53:48 reserved
1027 * 47:6 physical base address of PD or PTE
1028 * 5:3 reserved
1029 * 2 C
1030 * 1 system
1031 * 0 valid
1032 */
1033
gmc_v9_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)1034 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1035
1036 {
1037 switch (flags) {
1038 case AMDGPU_VM_MTYPE_DEFAULT:
1039 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1040 case AMDGPU_VM_MTYPE_NC:
1041 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1042 case AMDGPU_VM_MTYPE_WC:
1043 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1044 case AMDGPU_VM_MTYPE_RW:
1045 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1046 case AMDGPU_VM_MTYPE_CC:
1047 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1048 case AMDGPU_VM_MTYPE_UC:
1049 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1050 default:
1051 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1052 }
1053 }
1054
gmc_v9_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)1055 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1056 uint64_t *addr, uint64_t *flags)
1057 {
1058 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1059 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1060 BUG_ON(*addr & 0xFFFF00000000003FULL);
1061
1062 if (!adev->gmc.translate_further)
1063 return;
1064
1065 if (level == AMDGPU_VM_PDB1) {
1066 /* Set the block fragment size */
1067 if (!(*flags & AMDGPU_PDE_PTE))
1068 *flags |= AMDGPU_PDE_BFS(0x9);
1069
1070 } else if (level == AMDGPU_VM_PDB0) {
1071 if (*flags & AMDGPU_PDE_PTE)
1072 *flags &= ~AMDGPU_PDE_PTE;
1073 else
1074 *flags |= AMDGPU_PTE_TF;
1075 }
1076 }
1077
gmc_v9_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1078 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1079 struct amdgpu_bo_va_mapping *mapping,
1080 uint64_t *flags)
1081 {
1082 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1083 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1084
1085 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1086 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1087
1088 if (mapping->flags & AMDGPU_PTE_PRT) {
1089 *flags |= AMDGPU_PTE_PRT;
1090 *flags &= ~AMDGPU_PTE_VALID;
1091 }
1092
1093 if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1094 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
1095 !(*flags & AMDGPU_PTE_SYSTEM) &&
1096 mapping->bo_va->is_xgmi)
1097 *flags |= AMDGPU_PTE_SNOOPED;
1098
1099 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
1100 *flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
1101 }
1102
gmc_v9_0_get_vbios_fb_size(struct amdgpu_device * adev)1103 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1104 {
1105 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1106 unsigned size;
1107
1108 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1109 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1110 } else {
1111 u32 viewport;
1112
1113 switch (adev->ip_versions[DCE_HWIP][0]) {
1114 case IP_VERSION(1, 0, 0):
1115 case IP_VERSION(1, 0, 1):
1116 case IP_VERSION(2, 1, 0):
1117 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1118 size = (REG_GET_FIELD(viewport,
1119 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1120 REG_GET_FIELD(viewport,
1121 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1122 4);
1123 break;
1124 default:
1125 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1126 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1127 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1128 4);
1129 break;
1130 }
1131 }
1132
1133 return size;
1134 }
1135
1136 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1137 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1138 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1139 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1140 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1141 .map_mtype = gmc_v9_0_map_mtype,
1142 .get_vm_pde = gmc_v9_0_get_vm_pde,
1143 .get_vm_pte = gmc_v9_0_get_vm_pte,
1144 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1145 };
1146
gmc_v9_0_set_gmc_funcs(struct amdgpu_device * adev)1147 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1148 {
1149 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1150 }
1151
gmc_v9_0_set_umc_funcs(struct amdgpu_device * adev)1152 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1153 {
1154 switch (adev->ip_versions[UMC_HWIP][0]) {
1155 case IP_VERSION(6, 0, 0):
1156 adev->umc.funcs = &umc_v6_0_funcs;
1157 break;
1158 case IP_VERSION(6, 1, 1):
1159 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1160 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1161 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1162 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1163 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1164 adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1165 break;
1166 case IP_VERSION(6, 1, 2):
1167 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1168 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1169 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1170 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1171 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1172 adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1173 break;
1174 case IP_VERSION(6, 7, 0):
1175 adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
1176 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1177 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1178 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1179 if (!adev->gmc.xgmi.connected_to_cpu)
1180 adev->umc.ras_funcs = &umc_v6_7_ras_funcs;
1181 if (1 & adev->smuio.funcs->get_die_id(adev))
1182 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1183 else
1184 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1185 break;
1186 default:
1187 break;
1188 }
1189 }
1190
gmc_v9_0_set_mmhub_funcs(struct amdgpu_device * adev)1191 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1192 {
1193 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1194 case IP_VERSION(9, 4, 1):
1195 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1196 break;
1197 case IP_VERSION(9, 4, 2):
1198 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1199 break;
1200 default:
1201 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1202 break;
1203 }
1204 }
1205
gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device * adev)1206 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1207 {
1208 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1209 case IP_VERSION(9, 4, 0):
1210 adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
1211 break;
1212 case IP_VERSION(9, 4, 1):
1213 adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
1214 break;
1215 case IP_VERSION(9, 4, 2):
1216 adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
1217 break;
1218 default:
1219 /* mmhub ras is not available */
1220 break;
1221 }
1222 }
1223
gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device * adev)1224 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1225 {
1226 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1227 }
1228
gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device * adev)1229 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1230 {
1231 adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
1232 }
1233
gmc_v9_0_set_mca_funcs(struct amdgpu_device * adev)1234 static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
1235 {
1236 /* is UMC the right IP to check for MCA? Maybe DF? */
1237 switch (adev->ip_versions[UMC_HWIP][0]) {
1238 case IP_VERSION(6, 7, 0):
1239 if (!adev->gmc.xgmi.connected_to_cpu)
1240 adev->mca.funcs = &mca_v3_0_funcs;
1241 break;
1242 default:
1243 break;
1244 }
1245 }
1246
gmc_v9_0_early_init(void * handle)1247 static int gmc_v9_0_early_init(void *handle)
1248 {
1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250
1251 /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
1252 if (adev->asic_type == CHIP_VEGA20 ||
1253 adev->asic_type == CHIP_ARCTURUS)
1254 adev->gmc.xgmi.supported = true;
1255
1256 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
1257 adev->gmc.xgmi.supported = true;
1258 adev->gmc.xgmi.connected_to_cpu =
1259 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1260 }
1261
1262 gmc_v9_0_set_gmc_funcs(adev);
1263 gmc_v9_0_set_irq_funcs(adev);
1264 gmc_v9_0_set_umc_funcs(adev);
1265 gmc_v9_0_set_mmhub_funcs(adev);
1266 gmc_v9_0_set_mmhub_ras_funcs(adev);
1267 gmc_v9_0_set_gfxhub_funcs(adev);
1268 gmc_v9_0_set_hdp_ras_funcs(adev);
1269 gmc_v9_0_set_mca_funcs(adev);
1270
1271 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1272 adev->gmc.shared_aperture_end =
1273 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1274 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1275 adev->gmc.private_aperture_end =
1276 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1277
1278 return 0;
1279 }
1280
gmc_v9_0_late_init(void * handle)1281 static int gmc_v9_0_late_init(void *handle)
1282 {
1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1284 int r;
1285
1286 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1287 if (r)
1288 return r;
1289
1290 /*
1291 * Workaround performance drop issue with VBIOS enables partial
1292 * writes, while disables HBM ECC for vega10.
1293 */
1294 if (!amdgpu_sriov_vf(adev) &&
1295 (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
1296 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1297 if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1298 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1299 }
1300 }
1301
1302 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1303 if (adev->mmhub.ras_funcs &&
1304 adev->mmhub.ras_funcs->reset_ras_error_count)
1305 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
1306
1307 if (adev->hdp.ras_funcs &&
1308 adev->hdp.ras_funcs->reset_ras_error_count)
1309 adev->hdp.ras_funcs->reset_ras_error_count(adev);
1310 }
1311
1312 r = amdgpu_gmc_ras_late_init(adev);
1313 if (r)
1314 return r;
1315
1316 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1317 }
1318
gmc_v9_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)1319 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1320 struct amdgpu_gmc *mc)
1321 {
1322 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1323
1324 /* add the xgmi offset of the physical node */
1325 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1326 if (adev->gmc.xgmi.connected_to_cpu) {
1327 amdgpu_gmc_sysvm_location(adev, mc);
1328 } else {
1329 amdgpu_gmc_vram_location(adev, mc, base);
1330 amdgpu_gmc_gart_location(adev, mc);
1331 amdgpu_gmc_agp_location(adev, mc);
1332 }
1333 /* base offset of vram pages */
1334 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1335
1336 /* XXX: add the xgmi offset of the physical node? */
1337 adev->vm_manager.vram_base_offset +=
1338 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1339 }
1340
1341 /**
1342 * gmc_v9_0_mc_init - initialize the memory controller driver params
1343 *
1344 * @adev: amdgpu_device pointer
1345 *
1346 * Look up the amount of vram, vram width, and decide how to place
1347 * vram and gart within the GPU's physical address space.
1348 * Returns 0 for success.
1349 */
gmc_v9_0_mc_init(struct amdgpu_device * adev)1350 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1351 {
1352 int r;
1353
1354 /* size in MB on si */
1355 adev->gmc.mc_vram_size =
1356 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1357 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1358
1359 if (!(adev->flags & AMD_IS_APU) &&
1360 !adev->gmc.xgmi.connected_to_cpu) {
1361 r = amdgpu_device_resize_fb_bar(adev);
1362 if (r)
1363 return r;
1364 }
1365 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1366 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1367
1368 #ifdef CONFIG_X86_64
1369 /*
1370 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1371 * interface can use VRAM through here as it appears system reserved
1372 * memory in host address space.
1373 *
1374 * For APUs, VRAM is just the stolen system memory and can be accessed
1375 * directly.
1376 *
1377 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1378 */
1379
1380 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1381 if ((adev->flags & AMD_IS_APU) ||
1382 (adev->gmc.xgmi.supported &&
1383 adev->gmc.xgmi.connected_to_cpu)) {
1384 adev->gmc.aper_base =
1385 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1386 adev->gmc.xgmi.physical_node_id *
1387 adev->gmc.xgmi.node_segment_size;
1388 adev->gmc.aper_size = adev->gmc.real_vram_size;
1389 }
1390
1391 #endif
1392 /* In case the PCI BAR is larger than the actual amount of vram */
1393 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1394 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1395 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1396
1397 /* set the gart size */
1398 if (amdgpu_gart_size == -1) {
1399 switch (adev->ip_versions[GC_HWIP][0]) {
1400 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1401 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1402 case IP_VERSION(9, 4, 0):
1403 case IP_VERSION(9, 4, 1):
1404 case IP_VERSION(9, 4, 2):
1405 default:
1406 adev->gmc.gart_size = 512ULL << 20;
1407 break;
1408 case IP_VERSION(9, 1, 0): /* DCE SG support */
1409 case IP_VERSION(9, 2, 2): /* DCE SG support */
1410 case IP_VERSION(9, 3, 0):
1411 adev->gmc.gart_size = 1024ULL << 20;
1412 break;
1413 }
1414 } else {
1415 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1416 }
1417
1418 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1419
1420 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1421
1422 return 0;
1423 }
1424
gmc_v9_0_gart_init(struct amdgpu_device * adev)1425 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1426 {
1427 int r;
1428
1429 if (adev->gart.bo) {
1430 WARN(1, "VEGA10 PCIE GART already initialized\n");
1431 return 0;
1432 }
1433
1434 if (adev->gmc.xgmi.connected_to_cpu) {
1435 adev->gmc.vmid0_page_table_depth = 1;
1436 adev->gmc.vmid0_page_table_block_size = 12;
1437 } else {
1438 adev->gmc.vmid0_page_table_depth = 0;
1439 adev->gmc.vmid0_page_table_block_size = 0;
1440 }
1441
1442 /* Initialize common gart structure */
1443 r = amdgpu_gart_init(adev);
1444 if (r)
1445 return r;
1446 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1447 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1448 AMDGPU_PTE_EXECUTABLE;
1449
1450 r = amdgpu_gart_table_vram_alloc(adev);
1451 if (r)
1452 return r;
1453
1454 if (adev->gmc.xgmi.connected_to_cpu) {
1455 r = amdgpu_gmc_pdb0_alloc(adev);
1456 }
1457
1458 return r;
1459 }
1460
1461 /**
1462 * gmc_v9_0_save_registers - saves regs
1463 *
1464 * @adev: amdgpu_device pointer
1465 *
1466 * This saves potential register values that should be
1467 * restored upon resume
1468 */
gmc_v9_0_save_registers(struct amdgpu_device * adev)1469 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1470 {
1471 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1472 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
1473 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1474 }
1475
gmc_v9_0_sw_init(void * handle)1476 static int gmc_v9_0_sw_init(void *handle)
1477 {
1478 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1479 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1480
1481 adev->gfxhub.funcs->init(adev);
1482
1483 adev->mmhub.funcs->init(adev);
1484 if (adev->mca.funcs)
1485 adev->mca.funcs->init(adev);
1486
1487 spin_lock_init(&adev->gmc.invalidate_lock);
1488
1489 r = amdgpu_atomfirmware_get_vram_info(adev,
1490 &vram_width, &vram_type, &vram_vendor);
1491 if (amdgpu_sriov_vf(adev))
1492 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1493 * and DF related registers is not readable, seems hardcord is the
1494 * only way to set the correct vram_width
1495 */
1496 adev->gmc.vram_width = 2048;
1497 else if (amdgpu_emu_mode != 1)
1498 adev->gmc.vram_width = vram_width;
1499
1500 if (!adev->gmc.vram_width) {
1501 int chansize, numchan;
1502
1503 /* hbm memory channel size */
1504 if (adev->flags & AMD_IS_APU)
1505 chansize = 64;
1506 else
1507 chansize = 128;
1508
1509 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1510 adev->gmc.vram_width = numchan * chansize;
1511 }
1512
1513 adev->gmc.vram_type = vram_type;
1514 adev->gmc.vram_vendor = vram_vendor;
1515 switch (adev->ip_versions[GC_HWIP][0]) {
1516 case IP_VERSION(9, 1, 0):
1517 case IP_VERSION(9, 2, 2):
1518 adev->num_vmhubs = 2;
1519
1520 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1521 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1522 } else {
1523 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1524 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1525 adev->gmc.translate_further =
1526 adev->vm_manager.num_level > 1;
1527 }
1528 break;
1529 case IP_VERSION(9, 0, 1):
1530 case IP_VERSION(9, 2, 1):
1531 case IP_VERSION(9, 4, 0):
1532 case IP_VERSION(9, 3, 0):
1533 case IP_VERSION(9, 4, 2):
1534 adev->num_vmhubs = 2;
1535
1536
1537 /*
1538 * To fulfill 4-level page support,
1539 * vm size is 256TB (48bit), maximum size of Vega10,
1540 * block size 512 (9bit)
1541 */
1542 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1543 if (amdgpu_sriov_vf(adev))
1544 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1545 else
1546 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1547 break;
1548 case IP_VERSION(9, 4, 1):
1549 adev->num_vmhubs = 3;
1550
1551 /* Keep the vm size same with Vega20 */
1552 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1553 break;
1554 default:
1555 break;
1556 }
1557
1558 /* This interrupt is VMC page fault.*/
1559 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1560 &adev->gmc.vm_fault);
1561 if (r)
1562 return r;
1563
1564 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
1565 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1566 &adev->gmc.vm_fault);
1567 if (r)
1568 return r;
1569 }
1570
1571 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1572 &adev->gmc.vm_fault);
1573
1574 if (r)
1575 return r;
1576
1577 if (!amdgpu_sriov_vf(adev) &&
1578 !adev->gmc.xgmi.connected_to_cpu) {
1579 /* interrupt sent to DF. */
1580 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1581 &adev->gmc.ecc_irq);
1582 if (r)
1583 return r;
1584 }
1585
1586 /* Set the internal MC address mask
1587 * This is the max address of the GPU's
1588 * internal address space.
1589 */
1590 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1591
1592 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1593 if (r) {
1594 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1595 return r;
1596 }
1597 adev->need_swiotlb = drm_need_swiotlb(44);
1598
1599 if (adev->gmc.xgmi.supported) {
1600 r = adev->gfxhub.funcs->get_xgmi_info(adev);
1601 if (r)
1602 return r;
1603 }
1604
1605 r = gmc_v9_0_mc_init(adev);
1606 if (r)
1607 return r;
1608
1609 amdgpu_gmc_get_vbios_allocations(adev);
1610
1611 /* Memory manager */
1612 r = amdgpu_bo_init(adev);
1613 if (r)
1614 return r;
1615
1616 r = gmc_v9_0_gart_init(adev);
1617 if (r)
1618 return r;
1619
1620 /*
1621 * number of VMs
1622 * VMID 0 is reserved for System
1623 * amdgpu graphics/compute will use VMIDs 1..n-1
1624 * amdkfd will use VMIDs n..15
1625 *
1626 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1627 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1628 * for video processing.
1629 */
1630 adev->vm_manager.first_kfd_vmid =
1631 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1632 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
1633
1634 amdgpu_vm_manager_init(adev);
1635
1636 gmc_v9_0_save_registers(adev);
1637
1638 return 0;
1639 }
1640
gmc_v9_0_sw_fini(void * handle)1641 static int gmc_v9_0_sw_fini(void *handle)
1642 {
1643 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1644
1645 amdgpu_gmc_ras_fini(adev);
1646 amdgpu_gem_force_release(adev);
1647 amdgpu_vm_manager_fini(adev);
1648 amdgpu_gart_table_vram_free(adev);
1649 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
1650 amdgpu_bo_fini(adev);
1651
1652 return 0;
1653 }
1654
gmc_v9_0_init_golden_registers(struct amdgpu_device * adev)1655 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1656 {
1657
1658 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1659 case IP_VERSION(9, 0, 0):
1660 if (amdgpu_sriov_vf(adev))
1661 break;
1662 fallthrough;
1663 case IP_VERSION(9, 4, 0):
1664 soc15_program_register_sequence(adev,
1665 golden_settings_mmhub_1_0_0,
1666 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1667 soc15_program_register_sequence(adev,
1668 golden_settings_athub_1_0_0,
1669 ARRAY_SIZE(golden_settings_athub_1_0_0));
1670 break;
1671 case IP_VERSION(9, 1, 0):
1672 case IP_VERSION(9, 2, 0):
1673 /* TODO for renoir */
1674 soc15_program_register_sequence(adev,
1675 golden_settings_athub_1_0_0,
1676 ARRAY_SIZE(golden_settings_athub_1_0_0));
1677 break;
1678 default:
1679 break;
1680 }
1681 }
1682
1683 /**
1684 * gmc_v9_0_restore_registers - restores regs
1685 *
1686 * @adev: amdgpu_device pointer
1687 *
1688 * This restores register values, saved at suspend.
1689 */
gmc_v9_0_restore_registers(struct amdgpu_device * adev)1690 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1691 {
1692 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1693 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
1694 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1695 WARN_ON(adev->gmc.sdpif_register !=
1696 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1697 }
1698 }
1699
1700 /**
1701 * gmc_v9_0_gart_enable - gart enable
1702 *
1703 * @adev: amdgpu_device pointer
1704 */
gmc_v9_0_gart_enable(struct amdgpu_device * adev)1705 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1706 {
1707 int r;
1708
1709 if (adev->gmc.xgmi.connected_to_cpu)
1710 amdgpu_gmc_init_pdb0(adev);
1711
1712 if (adev->gart.bo == NULL) {
1713 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1714 return -EINVAL;
1715 }
1716
1717 r = amdgpu_gart_table_vram_pin(adev);
1718 if (r)
1719 return r;
1720
1721 r = adev->gfxhub.funcs->gart_enable(adev);
1722 if (r)
1723 return r;
1724
1725 r = adev->mmhub.funcs->gart_enable(adev);
1726 if (r)
1727 return r;
1728
1729 DRM_INFO("PCIE GART of %uM enabled.\n",
1730 (unsigned)(adev->gmc.gart_size >> 20));
1731 if (adev->gmc.pdb0_bo)
1732 DRM_INFO("PDB0 located at 0x%016llX\n",
1733 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
1734 DRM_INFO("PTB located at 0x%016llX\n",
1735 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1736
1737 adev->gart.ready = true;
1738 return 0;
1739 }
1740
gmc_v9_0_hw_init(void * handle)1741 static int gmc_v9_0_hw_init(void *handle)
1742 {
1743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1744 bool value;
1745 int r, i;
1746
1747 /* The sequence of these two function calls matters.*/
1748 gmc_v9_0_init_golden_registers(adev);
1749
1750 if (adev->mode_info.num_crtc) {
1751 /* Lockout access through VGA aperture*/
1752 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1753 /* disable VGA render */
1754 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1755 }
1756
1757 if (adev->mmhub.funcs->update_power_gating)
1758 adev->mmhub.funcs->update_power_gating(adev, true);
1759
1760 adev->hdp.funcs->init_registers(adev);
1761
1762 /* After HDP is initialized, flush HDP.*/
1763 adev->hdp.funcs->flush_hdp(adev, NULL);
1764
1765 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1766 value = false;
1767 else
1768 value = true;
1769
1770 if (!amdgpu_sriov_vf(adev)) {
1771 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1772 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1773 }
1774 for (i = 0; i < adev->num_vmhubs; ++i)
1775 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1776
1777 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1778 adev->umc.funcs->init_registers(adev);
1779
1780 r = gmc_v9_0_gart_enable(adev);
1781
1782 return r;
1783 }
1784
1785 /**
1786 * gmc_v9_0_gart_disable - gart disable
1787 *
1788 * @adev: amdgpu_device pointer
1789 *
1790 * This disables all VM page table.
1791 */
gmc_v9_0_gart_disable(struct amdgpu_device * adev)1792 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1793 {
1794 adev->gfxhub.funcs->gart_disable(adev);
1795 adev->mmhub.funcs->gart_disable(adev);
1796 amdgpu_gart_table_vram_unpin(adev);
1797 }
1798
gmc_v9_0_hw_fini(void * handle)1799 static int gmc_v9_0_hw_fini(void *handle)
1800 {
1801 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1802
1803 gmc_v9_0_gart_disable(adev);
1804
1805 if (amdgpu_sriov_vf(adev)) {
1806 /* full access mode, so don't touch any GMC register */
1807 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1808 return 0;
1809 }
1810
1811 /*
1812 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
1813 * a correct cached state for GMC. Otherwise, the "gate" again
1814 * operation on S3 resuming will fail due to wrong cached state.
1815 */
1816 if (adev->mmhub.funcs->update_power_gating)
1817 adev->mmhub.funcs->update_power_gating(adev, false);
1818
1819 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1820 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1821
1822 return 0;
1823 }
1824
gmc_v9_0_suspend(void * handle)1825 static int gmc_v9_0_suspend(void *handle)
1826 {
1827 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1828
1829 return gmc_v9_0_hw_fini(adev);
1830 }
1831
gmc_v9_0_resume(void * handle)1832 static int gmc_v9_0_resume(void *handle)
1833 {
1834 int r;
1835 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1836
1837 r = gmc_v9_0_hw_init(adev);
1838 if (r)
1839 return r;
1840
1841 amdgpu_vmid_reset_all(adev);
1842
1843 return 0;
1844 }
1845
gmc_v9_0_is_idle(void * handle)1846 static bool gmc_v9_0_is_idle(void *handle)
1847 {
1848 /* MC is always ready in GMC v9.*/
1849 return true;
1850 }
1851
gmc_v9_0_wait_for_idle(void * handle)1852 static int gmc_v9_0_wait_for_idle(void *handle)
1853 {
1854 /* There is no need to wait for MC idle in GMC v9.*/
1855 return 0;
1856 }
1857
gmc_v9_0_soft_reset(void * handle)1858 static int gmc_v9_0_soft_reset(void *handle)
1859 {
1860 /* XXX for emulation.*/
1861 return 0;
1862 }
1863
gmc_v9_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1864 static int gmc_v9_0_set_clockgating_state(void *handle,
1865 enum amd_clockgating_state state)
1866 {
1867 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868
1869 adev->mmhub.funcs->set_clockgating(adev, state);
1870
1871 athub_v1_0_set_clockgating(adev, state);
1872
1873 return 0;
1874 }
1875
gmc_v9_0_get_clockgating_state(void * handle,u32 * flags)1876 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1877 {
1878 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1879
1880 adev->mmhub.funcs->get_clockgating(adev, flags);
1881
1882 athub_v1_0_get_clockgating(adev, flags);
1883 }
1884
gmc_v9_0_set_powergating_state(void * handle,enum amd_powergating_state state)1885 static int gmc_v9_0_set_powergating_state(void *handle,
1886 enum amd_powergating_state state)
1887 {
1888 return 0;
1889 }
1890
1891 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1892 .name = "gmc_v9_0",
1893 .early_init = gmc_v9_0_early_init,
1894 .late_init = gmc_v9_0_late_init,
1895 .sw_init = gmc_v9_0_sw_init,
1896 .sw_fini = gmc_v9_0_sw_fini,
1897 .hw_init = gmc_v9_0_hw_init,
1898 .hw_fini = gmc_v9_0_hw_fini,
1899 .suspend = gmc_v9_0_suspend,
1900 .resume = gmc_v9_0_resume,
1901 .is_idle = gmc_v9_0_is_idle,
1902 .wait_for_idle = gmc_v9_0_wait_for_idle,
1903 .soft_reset = gmc_v9_0_soft_reset,
1904 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1905 .set_powergating_state = gmc_v9_0_set_powergating_state,
1906 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1907 };
1908
1909 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1910 {
1911 .type = AMD_IP_BLOCK_TYPE_GMC,
1912 .major = 9,
1913 .minor = 0,
1914 .rev = 0,
1915 .funcs = &gmc_v9_0_ip_funcs,
1916 };
1917