1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_6_1_offset.h"
26 #include "nbio/nbio_6_1_sh_mask.h"
27 #include "gc/gc_9_0_offset.h"
28 #include "gc/gc_9_0_sh_mask.h"
29 #include "mp/mp_9_0_offset.h"
30 #include "soc15.h"
31 #include "vega10_ih.h"
32 #include "soc15_common.h"
33 #include "mxgpu_ai.h"
34
xgpu_ai_mailbox_send_ack(struct amdgpu_device * adev)35 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
36 {
37 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
38 }
39
xgpu_ai_mailbox_set_valid(struct amdgpu_device * adev,bool val)40 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
41 {
42 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
43 }
44
45 /*
46 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
47 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
48 * by host.
49 *
50 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
51 * correct value since it doesn't return the RCV_DW0 under the case that
52 * RCV_MSG_VALID is set by host.
53 */
xgpu_ai_mailbox_peek_msg(struct amdgpu_device * adev)54 static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
55 {
56 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
57 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
58 }
59
60
xgpu_ai_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)61 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
62 enum idh_event event)
63 {
64 u32 reg;
65
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
68 if (reg != event)
69 return -ENOENT;
70
71 xgpu_ai_mailbox_send_ack(adev);
72
73 return 0;
74 }
75
xgpu_ai_peek_ack(struct amdgpu_device * adev)76 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
77 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78 }
79
xgpu_ai_poll_ack(struct amdgpu_device * adev)80 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
81 {
82 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
83 u8 reg;
84
85 do {
86 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87 if (reg & 2)
88 return 0;
89
90 mdelay(5);
91 timeout -= 5;
92 } while (timeout > 1);
93
94 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
95
96 return -ETIME;
97 }
98
xgpu_ai_poll_msg(struct amdgpu_device * adev,enum idh_event event)99 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100 {
101 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
102
103 do {
104 r = xgpu_ai_mailbox_rcv_msg(adev, event);
105 if (!r)
106 return 0;
107
108 msleep(10);
109 timeout -= 10;
110 } while (timeout > 1);
111
112 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
113
114 return -ETIME;
115 }
116
xgpu_ai_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)117 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
118 enum idh_request req, u32 data1, u32 data2, u32 data3) {
119 u32 reg;
120 int r;
121 uint8_t trn;
122
123 /* IMPORTANT:
124 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
125 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
126 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
127 * will return immediatly
128 */
129 do {
130 xgpu_ai_mailbox_set_valid(adev, false);
131 trn = xgpu_ai_peek_ack(adev);
132 if (trn) {
133 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
134 msleep(1);
135 }
136 } while(trn);
137
138 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
139 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
140 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
141 MSGBUF_DATA, req);
142 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
143 reg);
144 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
145 data1);
146 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
147 data2);
148 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
149 data3);
150
151 xgpu_ai_mailbox_set_valid(adev, true);
152
153 /* start to poll ack */
154 r = xgpu_ai_poll_ack(adev);
155 if (r)
156 pr_err("Doesn't get ack from pf, continue\n");
157
158 xgpu_ai_mailbox_set_valid(adev, false);
159 }
160
xgpu_ai_send_access_requests(struct amdgpu_device * adev,enum idh_request req)161 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
162 enum idh_request req)
163 {
164 int r;
165
166 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
167
168 /* start to check msg if request is idh_req_gpu_init_access */
169 if (req == IDH_REQ_GPU_INIT_ACCESS ||
170 req == IDH_REQ_GPU_FINI_ACCESS ||
171 req == IDH_REQ_GPU_RESET_ACCESS) {
172 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
173 if (r) {
174 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
175 return r;
176 }
177 /* Retrieve checksum from mailbox2 */
178 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
179 adev->virt.fw_reserve.checksum_key =
180 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
181 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
182 }
183 }
184
185 return 0;
186 }
187
xgpu_ai_request_reset(struct amdgpu_device * adev)188 static int xgpu_ai_request_reset(struct amdgpu_device *adev)
189 {
190 int ret, i = 0;
191
192 while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
193 ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
194 if (!ret)
195 break;
196 i++;
197 }
198
199 return ret;
200 }
201
xgpu_ai_request_full_gpu_access(struct amdgpu_device * adev,bool init)202 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
203 bool init)
204 {
205 enum idh_request req;
206
207 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
208 return xgpu_ai_send_access_requests(adev, req);
209 }
210
xgpu_ai_release_full_gpu_access(struct amdgpu_device * adev,bool init)211 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
212 bool init)
213 {
214 enum idh_request req;
215 int r = 0;
216
217 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
218 r = xgpu_ai_send_access_requests(adev, req);
219
220 return r;
221 }
222
xgpu_ai_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)223 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
224 struct amdgpu_irq_src *source,
225 struct amdgpu_iv_entry *entry)
226 {
227 DRM_DEBUG("get ack intr and do nothing.\n");
228 return 0;
229 }
230
xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)231 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
232 struct amdgpu_irq_src *source,
233 unsigned type,
234 enum amdgpu_interrupt_state state)
235 {
236 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
237
238 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
239 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
240 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
241
242 return 0;
243 }
244
xgpu_ai_mailbox_flr_work(struct work_struct * work)245 static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
246 {
247 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
248 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
249 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
250
251 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
252 * otherwise the mailbox msg will be ruined/reseted by
253 * the VF FLR.
254 */
255 if (!down_write_trylock(&adev->reset_sem))
256 return;
257
258 amdgpu_virt_fini_data_exchange(adev);
259 atomic_set(&adev->in_gpu_reset, 1);
260
261 xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
262
263 do {
264 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
265 goto flr_done;
266
267 msleep(10);
268 timeout -= 10;
269 } while (timeout > 1);
270
271 flr_done:
272 atomic_set(&adev->in_gpu_reset, 0);
273 up_write(&adev->reset_sem);
274
275 /* Trigger recovery for world switch failure if no TDR */
276 if (amdgpu_device_should_recover_gpu(adev)
277 && (!amdgpu_device_has_job_running(adev) ||
278 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
279 amdgpu_device_gpu_recover(adev, NULL);
280 }
281
xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)282 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
283 struct amdgpu_irq_src *src,
284 unsigned type,
285 enum amdgpu_interrupt_state state)
286 {
287 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
288
289 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
290 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
291 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
292
293 return 0;
294 }
295
xgpu_ai_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)296 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
297 struct amdgpu_irq_src *source,
298 struct amdgpu_iv_entry *entry)
299 {
300 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
301
302 switch (event) {
303 case IDH_FLR_NOTIFICATION:
304 if (amdgpu_sriov_runtime(adev))
305 schedule_work(&adev->virt.flr_work);
306 break;
307 case IDH_QUERY_ALIVE:
308 xgpu_ai_mailbox_send_ack(adev);
309 break;
310 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
311 * it byfar since that polling thread will handle it,
312 * other msg like flr complete is not handled here.
313 */
314 case IDH_CLR_MSG_BUF:
315 case IDH_FLR_NOTIFICATION_CMPL:
316 case IDH_READY_TO_ACCESS_GPU:
317 default:
318 break;
319 }
320
321 return 0;
322 }
323
324 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
325 .set = xgpu_ai_set_mailbox_ack_irq,
326 .process = xgpu_ai_mailbox_ack_irq,
327 };
328
329 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
330 .set = xgpu_ai_set_mailbox_rcv_irq,
331 .process = xgpu_ai_mailbox_rcv_irq,
332 };
333
xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device * adev)334 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
335 {
336 adev->virt.ack_irq.num_types = 1;
337 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
338 adev->virt.rcv_irq.num_types = 1;
339 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
340 }
341
xgpu_ai_mailbox_add_irq_id(struct amdgpu_device * adev)342 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
343 {
344 int r;
345
346 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
347 if (r)
348 return r;
349
350 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
351 if (r) {
352 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
353 return r;
354 }
355
356 return 0;
357 }
358
xgpu_ai_mailbox_get_irq(struct amdgpu_device * adev)359 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
360 {
361 int r;
362
363 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
364 if (r)
365 return r;
366 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
367 if (r) {
368 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
369 return r;
370 }
371
372 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
373
374 return 0;
375 }
376
xgpu_ai_mailbox_put_irq(struct amdgpu_device * adev)377 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
378 {
379 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
380 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
381 }
382
383 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
384 .req_full_gpu = xgpu_ai_request_full_gpu_access,
385 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
386 .reset_gpu = xgpu_ai_request_reset,
387 .wait_reset = NULL,
388 .trans_msg = xgpu_ai_mailbox_trans_msg,
389 };
390