2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "nbio/nbio_6_1_offset.h"
26 #include "nbio/nbio_6_1_sh_mask.h"
27 #include "gc/gc_9_0_offset.h"
28 #include "gc/gc_9_0_sh_mask.h"
30 #include "vega10_ih.h"
31 #include "soc15_common.h"
34 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device
*adev
)
36 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE
, 2);
39 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device
*adev
, bool val
)
41 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE
, val
? 1 : 0);
45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
53 static enum idh_event
xgpu_ai_mailbox_peek_msg(struct amdgpu_device
*adev
)
55 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0,
56 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0
));
60 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device
*adev
,
65 reg
= RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0,
66 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0
));
70 xgpu_ai_mailbox_send_ack(adev
);
75 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device
*adev
) {
76 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE
) & 2;
79 static int xgpu_ai_poll_ack(struct amdgpu_device
*adev
)
81 int timeout
= AI_MAILBOX_POLL_ACK_TIMEDOUT
;
85 reg
= RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE
);
91 } while (timeout
> 1);
93 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT
);
98 static int xgpu_ai_poll_msg(struct amdgpu_device
*adev
, enum idh_event event
)
100 int r
, timeout
= AI_MAILBOX_POLL_MSG_TIMEDOUT
;
103 r
= xgpu_ai_mailbox_rcv_msg(adev
, event
);
109 } while (timeout
> 1);
111 pr_err("Doesn't get msg:%d from pf, error=%d\n", event
, r
);
116 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device
*adev
,
117 enum idh_request req
, u32 data1
, u32 data2
, u32 data3
) {
123 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
124 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
125 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
126 * will return immediatly
129 xgpu_ai_mailbox_set_valid(adev
, false);
130 trn
= xgpu_ai_peek_ack(adev
);
132 pr_err("trn=%x ACK should not assert! wait again !\n", trn
);
137 reg
= RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0,
138 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0
));
139 reg
= REG_SET_FIELD(reg
, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0
,
141 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0
),
143 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1
),
145 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2
),
147 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3
),
150 xgpu_ai_mailbox_set_valid(adev
, true);
152 /* start to poll ack */
153 r
= xgpu_ai_poll_ack(adev
);
155 pr_err("Doesn't get ack from pf, continue\n");
157 xgpu_ai_mailbox_set_valid(adev
, false);
160 static int xgpu_ai_send_access_requests(struct amdgpu_device
*adev
,
161 enum idh_request req
)
165 xgpu_ai_mailbox_trans_msg(adev
, req
, 0, 0, 0);
167 /* start to check msg if request is idh_req_gpu_init_access */
168 if (req
== IDH_REQ_GPU_INIT_ACCESS
||
169 req
== IDH_REQ_GPU_FINI_ACCESS
||
170 req
== IDH_REQ_GPU_RESET_ACCESS
) {
171 r
= xgpu_ai_poll_msg(adev
, IDH_READY_TO_ACCESS_GPU
);
173 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
176 /* Retrieve checksum from mailbox2 */
177 if (req
== IDH_REQ_GPU_INIT_ACCESS
|| req
== IDH_REQ_GPU_RESET_ACCESS
) {
178 adev
->virt
.fw_reserve
.checksum_key
=
179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0,
180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2
));
187 static int xgpu_ai_request_reset(struct amdgpu_device
*adev
)
189 return xgpu_ai_send_access_requests(adev
, IDH_REQ_GPU_RESET_ACCESS
);
192 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device
*adev
,
195 enum idh_request req
;
197 req
= init
? IDH_REQ_GPU_INIT_ACCESS
: IDH_REQ_GPU_FINI_ACCESS
;
198 return xgpu_ai_send_access_requests(adev
, req
);
201 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device
*adev
,
204 enum idh_request req
;
207 req
= init
? IDH_REL_GPU_INIT_ACCESS
: IDH_REL_GPU_FINI_ACCESS
;
208 r
= xgpu_ai_send_access_requests(adev
, req
);
213 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device
*adev
,
214 struct amdgpu_irq_src
*source
,
215 struct amdgpu_iv_entry
*entry
)
217 DRM_DEBUG("get ack intr and do nothing.\n");
221 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device
*adev
,
222 struct amdgpu_irq_src
*source
,
224 enum amdgpu_interrupt_state state
)
226 u32 tmp
= RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL
));
228 tmp
= REG_SET_FIELD(tmp
, BIF_BX_PF0_MAILBOX_INT_CNTL
, ACK_INT_EN
,
229 (state
== AMDGPU_IRQ_STATE_ENABLE
) ? 1 : 0);
230 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL
), tmp
);
235 static void xgpu_ai_mailbox_flr_work(struct work_struct
*work
)
237 struct amdgpu_virt
*virt
= container_of(work
, struct amdgpu_virt
, flr_work
);
238 struct amdgpu_device
*adev
= container_of(virt
, struct amdgpu_device
, virt
);
239 int timeout
= AI_MAILBOX_POLL_FLR_TIMEDOUT
;
242 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
243 * otherwise the mailbox msg will be ruined/reseted by
246 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
247 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
248 * which means host side had finished this VF's FLR.
250 locked
= mutex_trylock(&adev
->lock_reset
);
252 adev
->in_gpu_reset
= 1;
255 if (xgpu_ai_mailbox_peek_msg(adev
) == IDH_FLR_NOTIFICATION_CMPL
)
260 } while (timeout
> 1);
264 adev
->in_gpu_reset
= 0;
265 mutex_unlock(&adev
->lock_reset
);
268 /* Trigger recovery for world switch failure if no TDR */
269 if (amdgpu_device_should_recover_gpu(adev
)
270 && amdgpu_lockup_timeout
== MAX_SCHEDULE_TIMEOUT
)
271 amdgpu_device_gpu_recover(adev
, NULL
);
274 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device
*adev
,
275 struct amdgpu_irq_src
*src
,
277 enum amdgpu_interrupt_state state
)
279 u32 tmp
= RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL
));
281 tmp
= REG_SET_FIELD(tmp
, BIF_BX_PF0_MAILBOX_INT_CNTL
, VALID_INT_EN
,
282 (state
== AMDGPU_IRQ_STATE_ENABLE
) ? 1 : 0);
283 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO
, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL
), tmp
);
288 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device
*adev
,
289 struct amdgpu_irq_src
*source
,
290 struct amdgpu_iv_entry
*entry
)
292 enum idh_event event
= xgpu_ai_mailbox_peek_msg(adev
);
295 case IDH_FLR_NOTIFICATION
:
296 if (amdgpu_sriov_runtime(adev
))
297 schedule_work(&adev
->virt
.flr_work
);
299 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
300 * it byfar since that polling thread will handle it,
301 * other msg like flr complete is not handled here.
303 case IDH_CLR_MSG_BUF
:
304 case IDH_FLR_NOTIFICATION_CMPL
:
305 case IDH_READY_TO_ACCESS_GPU
:
313 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs
= {
314 .set
= xgpu_ai_set_mailbox_ack_irq
,
315 .process
= xgpu_ai_mailbox_ack_irq
,
318 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs
= {
319 .set
= xgpu_ai_set_mailbox_rcv_irq
,
320 .process
= xgpu_ai_mailbox_rcv_irq
,
323 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device
*adev
)
325 adev
->virt
.ack_irq
.num_types
= 1;
326 adev
->virt
.ack_irq
.funcs
= &xgpu_ai_mailbox_ack_irq_funcs
;
327 adev
->virt
.rcv_irq
.num_types
= 1;
328 adev
->virt
.rcv_irq
.funcs
= &xgpu_ai_mailbox_rcv_irq_funcs
;
331 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device
*adev
)
335 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_BIF
, 135, &adev
->virt
.rcv_irq
);
339 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_BIF
, 138, &adev
->virt
.ack_irq
);
341 amdgpu_irq_put(adev
, &adev
->virt
.rcv_irq
, 0);
348 int xgpu_ai_mailbox_get_irq(struct amdgpu_device
*adev
)
352 r
= amdgpu_irq_get(adev
, &adev
->virt
.rcv_irq
, 0);
355 r
= amdgpu_irq_get(adev
, &adev
->virt
.ack_irq
, 0);
357 amdgpu_irq_put(adev
, &adev
->virt
.rcv_irq
, 0);
361 INIT_WORK(&adev
->virt
.flr_work
, xgpu_ai_mailbox_flr_work
);
366 void xgpu_ai_mailbox_put_irq(struct amdgpu_device
*adev
)
368 amdgpu_irq_put(adev
, &adev
->virt
.ack_irq
, 0);
369 amdgpu_irq_put(adev
, &adev
->virt
.rcv_irq
, 0);
372 const struct amdgpu_virt_ops xgpu_ai_virt_ops
= {
373 .req_full_gpu
= xgpu_ai_request_full_gpu_access
,
374 .rel_full_gpu
= xgpu_ai_release_full_gpu_access
,
375 .reset_gpu
= xgpu_ai_request_reset
,
377 .trans_msg
= xgpu_ai_mailbox_trans_msg
,