162306a36Sopenharmony_ci/* 262306a36Sopenharmony_ci * Copyright 2014 Advanced Micro Devices, Inc. 362306a36Sopenharmony_ci * 462306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 562306a36Sopenharmony_ci * copy of this software and associated documentation files (the "Software"), 662306a36Sopenharmony_ci * to deal in the Software without restriction, including without limitation 762306a36Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense, 862306a36Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the 962306a36Sopenharmony_ci * Software is furnished to do so, subject to the following conditions: 1062306a36Sopenharmony_ci * 1162306a36Sopenharmony_ci * The above copyright notice and this permission notice shall be included in 1262306a36Sopenharmony_ci * all copies or substantial portions of the Software. 1362306a36Sopenharmony_ci * 1462306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1562306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1662306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1762306a36Sopenharmony_ci * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 1862306a36Sopenharmony_ci * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 1962306a36Sopenharmony_ci * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 2062306a36Sopenharmony_ci * OTHER DEALINGS IN THE SOFTWARE. 2162306a36Sopenharmony_ci * 2262306a36Sopenharmony_ci */ 2362306a36Sopenharmony_ci 2462306a36Sopenharmony_ci#include "amdgpu.h" 2562306a36Sopenharmony_ci#include "nbio/nbio_6_1_offset.h" 2662306a36Sopenharmony_ci#include "nbio/nbio_6_1_sh_mask.h" 2762306a36Sopenharmony_ci#include "gc/gc_9_0_offset.h" 2862306a36Sopenharmony_ci#include "gc/gc_9_0_sh_mask.h" 2962306a36Sopenharmony_ci#include "mp/mp_9_0_offset.h" 3062306a36Sopenharmony_ci#include "soc15.h" 3162306a36Sopenharmony_ci#include "vega10_ih.h" 3262306a36Sopenharmony_ci#include "soc15_common.h" 3362306a36Sopenharmony_ci#include "mxgpu_ai.h" 3462306a36Sopenharmony_ci 3562306a36Sopenharmony_ci#include "amdgpu_reset.h" 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_cistatic void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 3862306a36Sopenharmony_ci{ 3962306a36Sopenharmony_ci WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); 4062306a36Sopenharmony_ci} 4162306a36Sopenharmony_ci 4262306a36Sopenharmony_cistatic void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 4362306a36Sopenharmony_ci{ 4462306a36Sopenharmony_ci WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); 4562306a36Sopenharmony_ci} 4662306a36Sopenharmony_ci 4762306a36Sopenharmony_ci/* 4862306a36Sopenharmony_ci * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine 4962306a36Sopenharmony_ci * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1 5062306a36Sopenharmony_ci * by host. 5162306a36Sopenharmony_ci * 5262306a36Sopenharmony_ci * if called no in IRQ routine, this peek_msg cannot guaranteed to return the 5362306a36Sopenharmony_ci * correct value since it doesn't return the RCV_DW0 under the case that 5462306a36Sopenharmony_ci * RCV_MSG_VALID is set by host. 5562306a36Sopenharmony_ci */ 5662306a36Sopenharmony_cistatic enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev) 5762306a36Sopenharmony_ci{ 5862306a36Sopenharmony_ci return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 5962306a36Sopenharmony_ci mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 6062306a36Sopenharmony_ci} 6162306a36Sopenharmony_ci 6262306a36Sopenharmony_ci 6362306a36Sopenharmony_cistatic int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 6462306a36Sopenharmony_ci enum idh_event event) 6562306a36Sopenharmony_ci{ 6662306a36Sopenharmony_ci u32 reg; 6762306a36Sopenharmony_ci 6862306a36Sopenharmony_ci reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 6962306a36Sopenharmony_ci mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 7062306a36Sopenharmony_ci if (reg != event) 7162306a36Sopenharmony_ci return -ENOENT; 7262306a36Sopenharmony_ci 7362306a36Sopenharmony_ci xgpu_ai_mailbox_send_ack(adev); 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_ci return 0; 7662306a36Sopenharmony_ci} 7762306a36Sopenharmony_ci 7862306a36Sopenharmony_cistatic uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) { 7962306a36Sopenharmony_ci return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; 8062306a36Sopenharmony_ci} 8162306a36Sopenharmony_ci 8262306a36Sopenharmony_cistatic int xgpu_ai_poll_ack(struct amdgpu_device *adev) 8362306a36Sopenharmony_ci{ 8462306a36Sopenharmony_ci int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT; 8562306a36Sopenharmony_ci u8 reg; 8662306a36Sopenharmony_ci 8762306a36Sopenharmony_ci do { 8862306a36Sopenharmony_ci reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE); 8962306a36Sopenharmony_ci if (reg & 2) 9062306a36Sopenharmony_ci return 0; 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_ci mdelay(5); 9362306a36Sopenharmony_ci timeout -= 5; 9462306a36Sopenharmony_ci } while (timeout > 1); 9562306a36Sopenharmony_ci 9662306a36Sopenharmony_ci pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT); 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci return -ETIME; 9962306a36Sopenharmony_ci} 10062306a36Sopenharmony_ci 10162306a36Sopenharmony_cistatic int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 10262306a36Sopenharmony_ci{ 10362306a36Sopenharmony_ci int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT; 10462306a36Sopenharmony_ci 10562306a36Sopenharmony_ci do { 10662306a36Sopenharmony_ci r = xgpu_ai_mailbox_rcv_msg(adev, event); 10762306a36Sopenharmony_ci if (!r) 10862306a36Sopenharmony_ci return 0; 10962306a36Sopenharmony_ci 11062306a36Sopenharmony_ci msleep(10); 11162306a36Sopenharmony_ci timeout -= 10; 11262306a36Sopenharmony_ci } while (timeout > 1); 11362306a36Sopenharmony_ci 11462306a36Sopenharmony_ci pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); 11562306a36Sopenharmony_ci 11662306a36Sopenharmony_ci return -ETIME; 11762306a36Sopenharmony_ci} 11862306a36Sopenharmony_ci 11962306a36Sopenharmony_cistatic void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 12062306a36Sopenharmony_ci enum idh_request req, u32 data1, u32 data2, u32 data3) { 12162306a36Sopenharmony_ci u32 reg; 12262306a36Sopenharmony_ci int r; 12362306a36Sopenharmony_ci uint8_t trn; 12462306a36Sopenharmony_ci 12562306a36Sopenharmony_ci /* IMPORTANT: 12662306a36Sopenharmony_ci * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK 12762306a36Sopenharmony_ci * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK 12862306a36Sopenharmony_ci * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack() 12962306a36Sopenharmony_ci * will return immediatly 13062306a36Sopenharmony_ci */ 13162306a36Sopenharmony_ci do { 13262306a36Sopenharmony_ci xgpu_ai_mailbox_set_valid(adev, false); 13362306a36Sopenharmony_ci trn = xgpu_ai_peek_ack(adev); 13462306a36Sopenharmony_ci if (trn) { 13562306a36Sopenharmony_ci pr_err("trn=%x ACK should not assert! wait again !\n", trn); 13662306a36Sopenharmony_ci msleep(1); 13762306a36Sopenharmony_ci } 13862306a36Sopenharmony_ci } while(trn); 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 14162306a36Sopenharmony_ci mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 14262306a36Sopenharmony_ci reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, 14362306a36Sopenharmony_ci MSGBUF_DATA, req); 14462306a36Sopenharmony_ci WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), 14562306a36Sopenharmony_ci reg); 14662306a36Sopenharmony_ci WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), 14762306a36Sopenharmony_ci data1); 14862306a36Sopenharmony_ci WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), 14962306a36Sopenharmony_ci data2); 15062306a36Sopenharmony_ci WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), 15162306a36Sopenharmony_ci data3); 15262306a36Sopenharmony_ci 15362306a36Sopenharmony_ci xgpu_ai_mailbox_set_valid(adev, true); 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_ci /* start to poll ack */ 15662306a36Sopenharmony_ci r = xgpu_ai_poll_ack(adev); 15762306a36Sopenharmony_ci if (r) 15862306a36Sopenharmony_ci pr_err("Doesn't get ack from pf, continue\n"); 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_ci xgpu_ai_mailbox_set_valid(adev, false); 16162306a36Sopenharmony_ci} 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_cistatic int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 16462306a36Sopenharmony_ci enum idh_request req) 16562306a36Sopenharmony_ci{ 16662306a36Sopenharmony_ci int r; 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci /* start to check msg if request is idh_req_gpu_init_access */ 17162306a36Sopenharmony_ci if (req == IDH_REQ_GPU_INIT_ACCESS || 17262306a36Sopenharmony_ci req == IDH_REQ_GPU_FINI_ACCESS || 17362306a36Sopenharmony_ci req == IDH_REQ_GPU_RESET_ACCESS) { 17462306a36Sopenharmony_ci r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 17562306a36Sopenharmony_ci if (r) { 17662306a36Sopenharmony_ci pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); 17762306a36Sopenharmony_ci return r; 17862306a36Sopenharmony_ci } 17962306a36Sopenharmony_ci /* Retrieve checksum from mailbox2 */ 18062306a36Sopenharmony_ci if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { 18162306a36Sopenharmony_ci adev->virt.fw_reserve.checksum_key = 18262306a36Sopenharmony_ci RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 18362306a36Sopenharmony_ci mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 18462306a36Sopenharmony_ci } 18562306a36Sopenharmony_ci } else if (req == IDH_REQ_GPU_INIT_DATA){ 18662306a36Sopenharmony_ci /* Dummy REQ_GPU_INIT_DATA handling */ 18762306a36Sopenharmony_ci r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY); 18862306a36Sopenharmony_ci /* version set to 0 since dummy */ 18962306a36Sopenharmony_ci adev->virt.req_init_data_ver = 0; 19062306a36Sopenharmony_ci } 19162306a36Sopenharmony_ci 19262306a36Sopenharmony_ci return 0; 19362306a36Sopenharmony_ci} 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_cistatic int xgpu_ai_request_reset(struct amdgpu_device *adev) 19662306a36Sopenharmony_ci{ 19762306a36Sopenharmony_ci int ret, i = 0; 19862306a36Sopenharmony_ci 19962306a36Sopenharmony_ci while (i < AI_MAILBOX_POLL_MSG_REP_MAX) { 20062306a36Sopenharmony_ci ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 20162306a36Sopenharmony_ci if (!ret) 20262306a36Sopenharmony_ci break; 20362306a36Sopenharmony_ci i++; 20462306a36Sopenharmony_ci } 20562306a36Sopenharmony_ci 20662306a36Sopenharmony_ci return ret; 20762306a36Sopenharmony_ci} 20862306a36Sopenharmony_ci 20962306a36Sopenharmony_cistatic int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 21062306a36Sopenharmony_ci bool init) 21162306a36Sopenharmony_ci{ 21262306a36Sopenharmony_ci enum idh_request req; 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 21562306a36Sopenharmony_ci return xgpu_ai_send_access_requests(adev, req); 21662306a36Sopenharmony_ci} 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_cistatic int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, 21962306a36Sopenharmony_ci bool init) 22062306a36Sopenharmony_ci{ 22162306a36Sopenharmony_ci enum idh_request req; 22262306a36Sopenharmony_ci int r = 0; 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_ci req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 22562306a36Sopenharmony_ci r = xgpu_ai_send_access_requests(adev, req); 22662306a36Sopenharmony_ci 22762306a36Sopenharmony_ci return r; 22862306a36Sopenharmony_ci} 22962306a36Sopenharmony_ci 23062306a36Sopenharmony_cistatic int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, 23162306a36Sopenharmony_ci struct amdgpu_irq_src *source, 23262306a36Sopenharmony_ci struct amdgpu_iv_entry *entry) 23362306a36Sopenharmony_ci{ 23462306a36Sopenharmony_ci DRM_DEBUG("get ack intr and do nothing.\n"); 23562306a36Sopenharmony_ci return 0; 23662306a36Sopenharmony_ci} 23762306a36Sopenharmony_ci 23862306a36Sopenharmony_cistatic int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, 23962306a36Sopenharmony_ci struct amdgpu_irq_src *source, 24062306a36Sopenharmony_ci unsigned type, 24162306a36Sopenharmony_ci enum amdgpu_interrupt_state state) 24262306a36Sopenharmony_ci{ 24362306a36Sopenharmony_ci u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, 24662306a36Sopenharmony_ci (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 24762306a36Sopenharmony_ci WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 24862306a36Sopenharmony_ci 24962306a36Sopenharmony_ci return 0; 25062306a36Sopenharmony_ci} 25162306a36Sopenharmony_ci 25262306a36Sopenharmony_cistatic void xgpu_ai_mailbox_flr_work(struct work_struct *work) 25362306a36Sopenharmony_ci{ 25462306a36Sopenharmony_ci struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 25562306a36Sopenharmony_ci struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 25662306a36Sopenharmony_ci int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; 25762306a36Sopenharmony_ci 25862306a36Sopenharmony_ci /* block amdgpu_gpu_recover till msg FLR COMPLETE received, 25962306a36Sopenharmony_ci * otherwise the mailbox msg will be ruined/reseted by 26062306a36Sopenharmony_ci * the VF FLR. 26162306a36Sopenharmony_ci */ 26262306a36Sopenharmony_ci if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0) 26362306a36Sopenharmony_ci return; 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_ci down_write(&adev->reset_domain->sem); 26662306a36Sopenharmony_ci 26762306a36Sopenharmony_ci amdgpu_virt_fini_data_exchange(adev); 26862306a36Sopenharmony_ci 26962306a36Sopenharmony_ci xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); 27062306a36Sopenharmony_ci 27162306a36Sopenharmony_ci do { 27262306a36Sopenharmony_ci if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) 27362306a36Sopenharmony_ci goto flr_done; 27462306a36Sopenharmony_ci 27562306a36Sopenharmony_ci msleep(10); 27662306a36Sopenharmony_ci timeout -= 10; 27762306a36Sopenharmony_ci } while (timeout > 1); 27862306a36Sopenharmony_ci 27962306a36Sopenharmony_ciflr_done: 28062306a36Sopenharmony_ci atomic_set(&adev->reset_domain->in_gpu_reset, 0); 28162306a36Sopenharmony_ci up_write(&adev->reset_domain->sem); 28262306a36Sopenharmony_ci 28362306a36Sopenharmony_ci /* Trigger recovery for world switch failure if no TDR */ 28462306a36Sopenharmony_ci if (amdgpu_device_should_recover_gpu(adev) 28562306a36Sopenharmony_ci && (!amdgpu_device_has_job_running(adev) || 28662306a36Sopenharmony_ci adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) { 28762306a36Sopenharmony_ci struct amdgpu_reset_context reset_context; 28862306a36Sopenharmony_ci memset(&reset_context, 0, sizeof(reset_context)); 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci reset_context.method = AMD_RESET_METHOD_NONE; 29162306a36Sopenharmony_ci reset_context.reset_req_dev = adev; 29262306a36Sopenharmony_ci clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci amdgpu_device_gpu_recover(adev, NULL, &reset_context); 29562306a36Sopenharmony_ci } 29662306a36Sopenharmony_ci} 29762306a36Sopenharmony_ci 29862306a36Sopenharmony_cistatic int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 29962306a36Sopenharmony_ci struct amdgpu_irq_src *src, 30062306a36Sopenharmony_ci unsigned type, 30162306a36Sopenharmony_ci enum amdgpu_interrupt_state state) 30262306a36Sopenharmony_ci{ 30362306a36Sopenharmony_ci u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 30462306a36Sopenharmony_ci 30562306a36Sopenharmony_ci tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, 30662306a36Sopenharmony_ci (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 30762306a36Sopenharmony_ci WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 30862306a36Sopenharmony_ci 30962306a36Sopenharmony_ci return 0; 31062306a36Sopenharmony_ci} 31162306a36Sopenharmony_ci 31262306a36Sopenharmony_cistatic int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, 31362306a36Sopenharmony_ci struct amdgpu_irq_src *source, 31462306a36Sopenharmony_ci struct amdgpu_iv_entry *entry) 31562306a36Sopenharmony_ci{ 31662306a36Sopenharmony_ci enum idh_event event = xgpu_ai_mailbox_peek_msg(adev); 31762306a36Sopenharmony_ci 31862306a36Sopenharmony_ci switch (event) { 31962306a36Sopenharmony_ci case IDH_FLR_NOTIFICATION: 32062306a36Sopenharmony_ci if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) 32162306a36Sopenharmony_ci WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, 32262306a36Sopenharmony_ci &adev->virt.flr_work), 32362306a36Sopenharmony_ci "Failed to queue work! at %s", 32462306a36Sopenharmony_ci __func__); 32562306a36Sopenharmony_ci break; 32662306a36Sopenharmony_ci case IDH_QUERY_ALIVE: 32762306a36Sopenharmony_ci xgpu_ai_mailbox_send_ack(adev); 32862306a36Sopenharmony_ci break; 32962306a36Sopenharmony_ci /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 33062306a36Sopenharmony_ci * it byfar since that polling thread will handle it, 33162306a36Sopenharmony_ci * other msg like flr complete is not handled here. 33262306a36Sopenharmony_ci */ 33362306a36Sopenharmony_ci case IDH_CLR_MSG_BUF: 33462306a36Sopenharmony_ci case IDH_FLR_NOTIFICATION_CMPL: 33562306a36Sopenharmony_ci case IDH_READY_TO_ACCESS_GPU: 33662306a36Sopenharmony_ci default: 33762306a36Sopenharmony_ci break; 33862306a36Sopenharmony_ci } 33962306a36Sopenharmony_ci 34062306a36Sopenharmony_ci return 0; 34162306a36Sopenharmony_ci} 34262306a36Sopenharmony_ci 34362306a36Sopenharmony_cistatic const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { 34462306a36Sopenharmony_ci .set = xgpu_ai_set_mailbox_ack_irq, 34562306a36Sopenharmony_ci .process = xgpu_ai_mailbox_ack_irq, 34662306a36Sopenharmony_ci}; 34762306a36Sopenharmony_ci 34862306a36Sopenharmony_cistatic const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { 34962306a36Sopenharmony_ci .set = xgpu_ai_set_mailbox_rcv_irq, 35062306a36Sopenharmony_ci .process = xgpu_ai_mailbox_rcv_irq, 35162306a36Sopenharmony_ci}; 35262306a36Sopenharmony_ci 35362306a36Sopenharmony_civoid xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) 35462306a36Sopenharmony_ci{ 35562306a36Sopenharmony_ci adev->virt.ack_irq.num_types = 1; 35662306a36Sopenharmony_ci adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; 35762306a36Sopenharmony_ci adev->virt.rcv_irq.num_types = 1; 35862306a36Sopenharmony_ci adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; 35962306a36Sopenharmony_ci} 36062306a36Sopenharmony_ci 36162306a36Sopenharmony_ciint xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) 36262306a36Sopenharmony_ci{ 36362306a36Sopenharmony_ci int r; 36462306a36Sopenharmony_ci 36562306a36Sopenharmony_ci r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 36662306a36Sopenharmony_ci if (r) 36762306a36Sopenharmony_ci return r; 36862306a36Sopenharmony_ci 36962306a36Sopenharmony_ci r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 37062306a36Sopenharmony_ci if (r) { 37162306a36Sopenharmony_ci amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 37262306a36Sopenharmony_ci return r; 37362306a36Sopenharmony_ci } 37462306a36Sopenharmony_ci 37562306a36Sopenharmony_ci return 0; 37662306a36Sopenharmony_ci} 37762306a36Sopenharmony_ci 37862306a36Sopenharmony_ciint xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) 37962306a36Sopenharmony_ci{ 38062306a36Sopenharmony_ci int r; 38162306a36Sopenharmony_ci 38262306a36Sopenharmony_ci r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 38362306a36Sopenharmony_ci if (r) 38462306a36Sopenharmony_ci return r; 38562306a36Sopenharmony_ci r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 38662306a36Sopenharmony_ci if (r) { 38762306a36Sopenharmony_ci amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 38862306a36Sopenharmony_ci return r; 38962306a36Sopenharmony_ci } 39062306a36Sopenharmony_ci 39162306a36Sopenharmony_ci INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); 39262306a36Sopenharmony_ci 39362306a36Sopenharmony_ci return 0; 39462306a36Sopenharmony_ci} 39562306a36Sopenharmony_ci 39662306a36Sopenharmony_civoid xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) 39762306a36Sopenharmony_ci{ 39862306a36Sopenharmony_ci amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 39962306a36Sopenharmony_ci amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 40062306a36Sopenharmony_ci} 40162306a36Sopenharmony_ci 40262306a36Sopenharmony_cistatic int xgpu_ai_request_init_data(struct amdgpu_device *adev) 40362306a36Sopenharmony_ci{ 40462306a36Sopenharmony_ci return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); 40562306a36Sopenharmony_ci} 40662306a36Sopenharmony_ci 40762306a36Sopenharmony_cistatic void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev) 40862306a36Sopenharmony_ci{ 40962306a36Sopenharmony_ci xgpu_ai_send_access_requests(adev, IDH_RAS_POISON); 41062306a36Sopenharmony_ci} 41162306a36Sopenharmony_ci 41262306a36Sopenharmony_ciconst struct amdgpu_virt_ops xgpu_ai_virt_ops = { 41362306a36Sopenharmony_ci .req_full_gpu = xgpu_ai_request_full_gpu_access, 41462306a36Sopenharmony_ci .rel_full_gpu = xgpu_ai_release_full_gpu_access, 41562306a36Sopenharmony_ci .reset_gpu = xgpu_ai_request_reset, 41662306a36Sopenharmony_ci .wait_reset = NULL, 41762306a36Sopenharmony_ci .trans_msg = xgpu_ai_mailbox_trans_msg, 41862306a36Sopenharmony_ci .req_init_data = xgpu_ai_request_init_data, 41962306a36Sopenharmony_ci .ras_poison_handler = xgpu_ai_ras_poison_handler, 42062306a36Sopenharmony_ci}; 421