1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "umc_v6_1.h"
24 #include "amdgpu_ras.h"
25 #include "amdgpu_umc.h"
26 #include "amdgpu.h"
27
28 #include "rsmu/rsmu_0_0_2_offset.h"
29 #include "rsmu/rsmu_0_0_2_sh_mask.h"
30 #include "umc/umc_6_1_1_offset.h"
31 #include "umc/umc_6_1_1_sh_mask.h"
32 #include "umc/umc_6_1_2_offset.h"
33
34 #define UMC_6_INST_DIST 0x40000
35
36 const uint32_t
37 umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
38 {2, 18, 11, 27}, {4, 20, 13, 29},
39 {1, 17, 8, 24}, {7, 23, 14, 30},
40 {10, 26, 3, 19}, {12, 28, 5, 21},
41 {9, 25, 0, 16}, {15, 31, 6, 22}
42 };
43
umc_v6_1_enable_umc_index_mode(struct amdgpu_device * adev)44 static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
45 {
46 uint32_t rsmu_umc_addr, rsmu_umc_val;
47
48 rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
49 mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
50 rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
51
52 rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
53 RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
54 RSMU_UMC_INDEX_MODE_EN, 1);
55
56 WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
57 }
58
umc_v6_1_disable_umc_index_mode(struct amdgpu_device * adev)59 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
60 {
61 uint32_t rsmu_umc_addr, rsmu_umc_val;
62
63 rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
64 mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
65 rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
66
67 rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
68 RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
69 RSMU_UMC_INDEX_MODE_EN, 0);
70
71 WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
72 }
73
umc_v6_1_get_umc_index_mode_state(struct amdgpu_device * adev)74 static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
75 {
76 uint32_t rsmu_umc_addr, rsmu_umc_val;
77
78 rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
79 mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
80 rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
81
82 return REG_GET_FIELD(rsmu_umc_val,
83 RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
84 RSMU_UMC_INDEX_MODE_EN);
85 }
86
get_umc_6_reg_offset(struct amdgpu_device * adev,uint32_t umc_inst,uint32_t ch_inst)87 static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
88 uint32_t umc_inst,
89 uint32_t ch_inst)
90 {
91 return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
92 }
93
umc_v6_1_clear_error_count_per_channel(struct amdgpu_device * adev,uint32_t umc_reg_offset)94 static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
95 uint32_t umc_reg_offset)
96 {
97 uint32_t ecc_err_cnt_addr;
98 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
99
100 if (adev->asic_type == CHIP_ARCTURUS) {
101 /* UMC 6_1_2 registers */
102 ecc_err_cnt_sel_addr =
103 SOC15_REG_OFFSET(UMC, 0,
104 mmUMCCH0_0_EccErrCntSel_ARCT);
105 ecc_err_cnt_addr =
106 SOC15_REG_OFFSET(UMC, 0,
107 mmUMCCH0_0_EccErrCnt_ARCT);
108 } else {
109 /* UMC 6_1_1 registers */
110 ecc_err_cnt_sel_addr =
111 SOC15_REG_OFFSET(UMC, 0,
112 mmUMCCH0_0_EccErrCntSel);
113 ecc_err_cnt_addr =
114 SOC15_REG_OFFSET(UMC, 0,
115 mmUMCCH0_0_EccErrCnt);
116 }
117
118 /* select the lower chip */
119 ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
120 umc_reg_offset) * 4);
121 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
122 UMCCH0_0_EccErrCntSel,
123 EccErrCntCsSel, 0);
124 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
125 ecc_err_cnt_sel);
126
127 /* clear lower chip error count */
128 WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
129 UMC_V6_1_CE_CNT_INIT);
130
131 /* select the higher chip */
132 ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
133 umc_reg_offset) * 4);
134 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
135 UMCCH0_0_EccErrCntSel,
136 EccErrCntCsSel, 1);
137 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
138 ecc_err_cnt_sel);
139
140 /* clear higher chip error count */
141 WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
142 UMC_V6_1_CE_CNT_INIT);
143 }
144
umc_v6_1_clear_error_count(struct amdgpu_device * adev)145 static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
146 {
147 uint32_t umc_inst = 0;
148 uint32_t ch_inst = 0;
149 uint32_t umc_reg_offset = 0;
150 uint32_t rsmu_umc_index_state =
151 umc_v6_1_get_umc_index_mode_state(adev);
152
153 if (rsmu_umc_index_state)
154 umc_v6_1_disable_umc_index_mode(adev);
155
156 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
157 umc_reg_offset = get_umc_6_reg_offset(adev,
158 umc_inst,
159 ch_inst);
160
161 umc_v6_1_clear_error_count_per_channel(adev,
162 umc_reg_offset);
163 }
164
165 if (rsmu_umc_index_state)
166 umc_v6_1_enable_umc_index_mode(adev);
167 }
168
umc_v6_1_query_correctable_error_count(struct amdgpu_device * adev,uint32_t umc_reg_offset,unsigned long * error_count)169 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
170 uint32_t umc_reg_offset,
171 unsigned long *error_count)
172 {
173 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
174 uint32_t ecc_err_cnt, ecc_err_cnt_addr;
175 uint64_t mc_umc_status;
176 uint32_t mc_umc_status_addr;
177
178 if (adev->asic_type == CHIP_ARCTURUS) {
179 /* UMC 6_1_2 registers */
180 ecc_err_cnt_sel_addr =
181 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
182 ecc_err_cnt_addr =
183 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
184 mc_umc_status_addr =
185 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
186 } else {
187 /* UMC 6_1_1 registers */
188 ecc_err_cnt_sel_addr =
189 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
190 ecc_err_cnt_addr =
191 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
192 mc_umc_status_addr =
193 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
194 }
195
196 /* select the lower chip and check the error count */
197 ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
198 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
199 EccErrCntCsSel, 0);
200 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
201
202 ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
203 *error_count +=
204 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
205 UMC_V6_1_CE_CNT_INIT);
206
207 /* select the higher chip and check the err counter */
208 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
209 EccErrCntCsSel, 1);
210 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
211
212 ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
213 *error_count +=
214 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
215 UMC_V6_1_CE_CNT_INIT);
216
217 /* check for SRAM correctable error
218 MCUMC_STATUS is a 64 bit register */
219 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
220 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
221 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
222 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
223 *error_count += 1;
224 }
225
umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device * adev,uint32_t umc_reg_offset,unsigned long * error_count)226 static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev,
227 uint32_t umc_reg_offset,
228 unsigned long *error_count)
229 {
230 uint64_t mc_umc_status;
231 uint32_t mc_umc_status_addr;
232
233 if (adev->asic_type == CHIP_ARCTURUS) {
234 /* UMC 6_1_2 registers */
235 mc_umc_status_addr =
236 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
237 } else {
238 /* UMC 6_1_1 registers */
239 mc_umc_status_addr =
240 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
241 }
242
243 /* check the MCUMC_STATUS */
244 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
245 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
246 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
247 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
248 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
249 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
250 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
251 *error_count += 1;
252 }
253
umc_v6_1_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)254 static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
255 void *ras_error_status)
256 {
257 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
258
259 uint32_t umc_inst = 0;
260 uint32_t ch_inst = 0;
261 uint32_t umc_reg_offset = 0;
262
263 uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
264
265 if (rsmu_umc_index_state)
266 umc_v6_1_disable_umc_index_mode(adev);
267
268 if ((adev->asic_type == CHIP_ARCTURUS) &&
269 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
270 DRM_WARN("Fail to disable DF-Cstate.\n");
271
272 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
273 umc_reg_offset = get_umc_6_reg_offset(adev,
274 umc_inst,
275 ch_inst);
276
277 umc_v6_1_query_correctable_error_count(adev,
278 umc_reg_offset,
279 &(err_data->ce_count));
280 umc_v6_1_querry_uncorrectable_error_count(adev,
281 umc_reg_offset,
282 &(err_data->ue_count));
283 }
284
285 if ((adev->asic_type == CHIP_ARCTURUS) &&
286 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
287 DRM_WARN("Fail to enable DF-Cstate\n");
288
289 if (rsmu_umc_index_state)
290 umc_v6_1_enable_umc_index_mode(adev);
291
292 umc_v6_1_clear_error_count(adev);
293 }
294
umc_v6_1_query_error_address(struct amdgpu_device * adev,struct ras_err_data * err_data,uint32_t umc_reg_offset,uint32_t ch_inst,uint32_t umc_inst)295 static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
296 struct ras_err_data *err_data,
297 uint32_t umc_reg_offset,
298 uint32_t ch_inst,
299 uint32_t umc_inst)
300 {
301 uint32_t lsb, mc_umc_status_addr;
302 uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
303 struct eeprom_table_record *err_rec;
304 uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
305
306 if (adev->asic_type == CHIP_ARCTURUS) {
307 /* UMC 6_1_2 registers */
308 mc_umc_status_addr =
309 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
310 mc_umc_addrt0 =
311 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT);
312 } else {
313 /* UMC 6_1_1 registers */
314 mc_umc_status_addr =
315 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
316 mc_umc_addrt0 =
317 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
318 }
319
320 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
321
322 if (mc_umc_status == 0)
323 return;
324
325 if (!err_data->err_addr) {
326 /* clear umc status */
327 WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
328 return;
329 }
330
331 err_rec = &err_data->err_addr[err_data->err_addr_cnt];
332
333 /* calculate error address if ue/ce error is detected */
334 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
335 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
336 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
337
338 err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
339 /* the lowest lsb bits should be ignored */
340 lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
341 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
342 err_addr &= ~((0x1ULL << lsb) - 1);
343
344 /* translate umc channel address to soc pa, 3 parts are included */
345 retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
346 ADDR_OF_256B_BLOCK(channel_index) |
347 OFFSET_IN_256B_BLOCK(err_addr);
348
349 /* we only save ue error information currently, ce is skipped */
350 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
351 == 1) {
352 err_rec->address = err_addr;
353 /* page frame address is saved */
354 err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
355 err_rec->ts = (uint64_t)ktime_get_real_seconds();
356 err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
357 err_rec->cu = 0;
358 err_rec->mem_channel = channel_index;
359 err_rec->mcumc_id = umc_inst;
360
361 err_data->err_addr_cnt++;
362 }
363 }
364
365 /* clear umc status */
366 WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
367 }
368
umc_v6_1_query_ras_error_address(struct amdgpu_device * adev,void * ras_error_status)369 static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
370 void *ras_error_status)
371 {
372 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
373
374 uint32_t umc_inst = 0;
375 uint32_t ch_inst = 0;
376 uint32_t umc_reg_offset = 0;
377
378 uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
379
380 if (rsmu_umc_index_state)
381 umc_v6_1_disable_umc_index_mode(adev);
382
383 if ((adev->asic_type == CHIP_ARCTURUS) &&
384 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
385 DRM_WARN("Fail to disable DF-Cstate.\n");
386
387 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
388 umc_reg_offset = get_umc_6_reg_offset(adev,
389 umc_inst,
390 ch_inst);
391
392 umc_v6_1_query_error_address(adev,
393 err_data,
394 umc_reg_offset,
395 ch_inst,
396 umc_inst);
397 }
398
399 if ((adev->asic_type == CHIP_ARCTURUS) &&
400 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
401 DRM_WARN("Fail to enable DF-Cstate\n");
402
403 if (rsmu_umc_index_state)
404 umc_v6_1_enable_umc_index_mode(adev);
405 }
406
umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device * adev,uint32_t umc_reg_offset)407 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
408 uint32_t umc_reg_offset)
409 {
410 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
411 uint32_t ecc_err_cnt_addr;
412
413 if (adev->asic_type == CHIP_ARCTURUS) {
414 /* UMC 6_1_2 registers */
415 ecc_err_cnt_sel_addr =
416 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
417 ecc_err_cnt_addr =
418 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
419 } else {
420 /* UMC 6_1_1 registers */
421 ecc_err_cnt_sel_addr =
422 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
423 ecc_err_cnt_addr =
424 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
425 }
426
427 /* select the lower chip and check the error count */
428 ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
429 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
430 EccErrCntCsSel, 0);
431 /* set ce error interrupt type to APIC based interrupt */
432 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
433 EccErrInt, 0x1);
434 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
435 /* set error count to initial value */
436 WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
437
438 /* select the higher chip and check the err counter */
439 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
440 EccErrCntCsSel, 1);
441 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
442 WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
443 }
444
umc_v6_1_err_cnt_init(struct amdgpu_device * adev)445 static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
446 {
447 uint32_t umc_inst = 0;
448 uint32_t ch_inst = 0;
449 uint32_t umc_reg_offset = 0;
450
451 uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
452
453 if (rsmu_umc_index_state)
454 umc_v6_1_disable_umc_index_mode(adev);
455
456 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
457 umc_reg_offset = get_umc_6_reg_offset(adev,
458 umc_inst,
459 ch_inst);
460
461 umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
462 }
463
464 if (rsmu_umc_index_state)
465 umc_v6_1_enable_umc_index_mode(adev);
466 }
467
468 const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = {
469 .err_cnt_init = umc_v6_1_err_cnt_init,
470 .ras_late_init = amdgpu_umc_ras_late_init,
471 .ras_fini = amdgpu_umc_ras_fini,
472 .query_ras_error_count = umc_v6_1_query_ras_error_count,
473 .query_ras_error_address = umc_v6_1_query_ras_error_address,
474 };
475