Lines Matching refs:adev

102 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
105 address = adev->nbio.funcs->get_pcie_index_offset(adev);
106 data = adev->nbio.funcs->get_pcie_data_offset(adev);
108 return amdgpu_device_indirect_rreg(adev, address, data, reg);
111 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
115 address = adev->nbio.funcs->get_pcie_index_offset(adev);
116 data = adev->nbio.funcs->get_pcie_data_offset(adev);
118 amdgpu_device_indirect_wreg(adev, address, data, reg, v);
121 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
124 address = adev->nbio.funcs->get_pcie_index_offset(adev);
125 data = adev->nbio.funcs->get_pcie_data_offset(adev);
127 return amdgpu_device_indirect_rreg64(adev, address, data, reg);
130 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
134 address = adev->nbio.funcs->get_pcie_index_offset(adev);
135 data = adev->nbio.funcs->get_pcie_data_offset(adev);
137 amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
140 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
148 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
151 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
155 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
162 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
165 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
168 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
176 spin_lock_irqsave(&adev->didt_idx_lock, flags);
179 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
183 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
190 spin_lock_irqsave(&adev->didt_idx_lock, flags);
193 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
196 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
201 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
204 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
208 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
212 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
215 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
218 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
223 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
226 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
230 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
234 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
237 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
240 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
242 return adev->nbio.funcs->get_memsize(adev);
245 static u32 soc15_get_xclk(struct amdgpu_device *adev)
247 u32 reference_clock = adev->clock.spll.reference_freq;
249 if (adev->asic_type == CHIP_RENOIR)
251 if (adev->asic_type == CHIP_RAVEN)
258 void soc15_grbm_select(struct amdgpu_device *adev,
270 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
275 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
281 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
294 if (adev->flags & AMD_IS_APU)
300 switch (adev->asic_type) {
344 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
349 mutex_lock(&adev->grbm_idx_mutex);
351 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
356 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
357 mutex_unlock(&adev->grbm_idx_mutex);
361 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
366 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
369 return adev->gfx.config.gb_addr_config;
371 return adev->gfx.config.db_debug2;
376 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
385 if (!adev->reg_offset[en->hwip][en->inst])
387 else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
391 *value = soc15_get_register_value(adev,
403 * @adev: amdgpu_device pointer
411 void soc15_program_register_sequence(struct amdgpu_device *adev,
421 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
443 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
448 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
450 dev_info(adev->dev, "GPU mode1 reset\n");
453 pci_clear_master(adev->pdev);
455 amdgpu_device_cache_pci_state(adev->pdev);
457 ret = psp_gpu_reset(adev);
459 dev_err(adev->dev, "GPU mode1 reset failed\n");
461 amdgpu_device_load_pci_state(adev->pdev);
464 for (i = 0; i < adev->usec_timeout; i++) {
465 u32 memsize = adev->nbio.funcs->get_memsize(adev);
472 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
477 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
479 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
484 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
486 ret = amdgpu_dpm_baco_reset(adev);
492 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
498 soc15_asic_reset_method(struct amdgpu_device *adev)
501 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
509 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
512 switch (adev->asic_type) {
519 baco_reset = amdgpu_dpm_is_baco_supported(adev);
522 if (adev->psp.sos_fw_version >= 0x80067)
523 baco_reset = amdgpu_dpm_is_baco_supported(adev);
529 if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
542 static int soc15_asic_reset(struct amdgpu_device *adev)
545 if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
546 !(adev->apu_flags & AMD_APU_IS_RAVEN2))
549 switch (soc15_asic_reset_method(adev)) {
551 dev_info(adev->dev, "BACO reset\n");
552 return soc15_asic_baco_reset(adev);
554 dev_info(adev->dev, "MODE2 reset\n");
555 return amdgpu_dpm_mode2_reset(adev);
557 dev_info(adev->dev, "MODE1 reset\n");
558 return soc15_asic_mode1_reset(adev);
562 static bool soc15_supports_baco(struct amdgpu_device *adev)
564 switch (adev->asic_type) {
568 return amdgpu_dpm_is_baco_supported(adev);
570 if (adev->psp.sos_fw_version >= 0x80067)
571 return amdgpu_dpm_is_baco_supported(adev);
578 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
584 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
588 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
592 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
597 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
604 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
606 if (pci_is_root_bus(adev->pdev->bus))
612 if (adev->flags & AMD_IS_APU)
615 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
622 static void soc15_program_aspm(struct amdgpu_device *adev)
631 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
634 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
635 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
647 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
649 return adev->nbio.funcs->get_rev_id(adev);
652 static void soc15_reg_base_init(struct amdgpu_device *adev)
657 switch (adev->asic_type) {
661 vega10_reg_base_init(adev);
667 r = amdgpu_discovery_reg_base_init(adev);
673 vega10_reg_base_init(adev);
676 vega20_reg_base_init(adev);
679 arct_reg_base_init(adev);
682 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
687 void soc15_set_virt_ops(struct amdgpu_device *adev)
689 adev->virt.ops = &xgpu_ai_virt_ops;
694 soc15_reg_base_init(adev);
697 int soc15_set_ip_blocks(struct amdgpu_device *adev)
700 if (!amdgpu_sriov_vf(adev))
701 soc15_reg_base_init(adev);
703 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
704 adev->gmc.xgmi.supported = true;
706 if (adev->flags & AMD_IS_APU) {
707 adev->nbio.funcs = &nbio_v7_0_funcs;
708 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
709 } else if (adev->asic_type == CHIP_VEGA20 ||
710 adev->asic_type == CHIP_ARCTURUS) {
711 adev->nbio.funcs = &nbio_v7_4_funcs;
712 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
714 adev->nbio.funcs = &nbio_v6_1_funcs;
715 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
718 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
719 adev->df.funcs = &df_v3_6_funcs;
721 adev->df.funcs = &df_v1_7_funcs;
723 adev->rev_id = soc15_get_rev_id(adev);
725 switch (adev->asic_type) {
729 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
730 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
733 if (amdgpu_sriov_vf(adev)) {
734 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
735 if (adev->asic_type == CHIP_VEGA20)
736 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
738 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
740 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
742 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
743 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
744 if (adev->asic_type == CHIP_VEGA20)
745 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
747 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
750 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
751 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
752 if (is_support_sw_smu(adev)) {
753 if (!amdgpu_sriov_vf(adev))
754 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
756 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
758 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
759 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
761 else if (amdgpu_device_has_dc_support(adev))
762 amdgpu_device_ip_block_add(adev, &dm_ip_block);
764 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
765 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
766 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
770 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
771 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
772 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
773 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
774 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
775 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
776 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
777 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
778 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
779 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
781 else if (amdgpu_device_has_dc_support(adev))
782 amdgpu_device_ip_block_add(adev, &dm_ip_block);
784 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
787 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
788 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
790 if (amdgpu_sriov_vf(adev)) {
791 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
792 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
793 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
795 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
796 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
797 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
800 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
801 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
802 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
803 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
804 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
806 if (amdgpu_sriov_vf(adev)) {
807 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
808 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
810 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
812 if (!amdgpu_sriov_vf(adev))
813 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
816 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
817 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
818 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
819 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
820 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
821 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
822 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
823 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
824 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
825 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
827 else if (amdgpu_device_has_dc_support(adev))
828 amdgpu_device_ip_block_add(adev, &dm_ip_block);
830 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
831 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
840 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
842 adev->nbio.funcs->hdp_flush(adev, ring);
845 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
855 static bool soc15_need_full_reset(struct amdgpu_device *adev)
861 static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
863 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
869 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
879 if (adev->flags & AMD_IS_APU)
916 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
926 if (adev->flags & AMD_IS_APU)
965 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
972 if (!amdgpu_passthrough(adev))
975 if (adev->flags & AMD_IS_APU)
988 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
1000 static void soc15_pre_asic_init(struct amdgpu_device *adev)
1002 gmc_v9_0_restore_registers(adev);
1055 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1057 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1058 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1059 adev->smc_rreg = NULL;
1060 adev->smc_wreg = NULL;
1061 adev->pcie_rreg = &soc15_pcie_rreg;
1062 adev->pcie_wreg = &soc15_pcie_wreg;
1063 adev->pcie_rreg64 = &soc15_pcie_rreg64;
1064 adev->pcie_wreg64 = &soc15_pcie_wreg64;
1065 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1066 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1067 adev->didt_rreg = &soc15_didt_rreg;
1068 adev->didt_wreg = &soc15_didt_wreg;
1069 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1070 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1071 adev->se_cac_rreg = &soc15_se_cac_rreg;
1072 adev->se_cac_wreg = &soc15_se_cac_wreg;
1075 adev->external_rev_id = 0xFF;
1076 switch (adev->asic_type) {
1078 adev->asic_funcs = &soc15_asic_funcs;
1079 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1098 adev->pg_flags = 0;
1099 adev->external_rev_id = 0x1;
1102 adev->asic_funcs = &soc15_asic_funcs;
1103 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1121 adev->pg_flags = 0;
1122 adev->external_rev_id = adev->rev_id + 0x14;
1125 adev->asic_funcs = &vega20_asic_funcs;
1126 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1144 adev->pg_flags = 0;
1145 adev->external_rev_id = adev->rev_id + 0x28;
1148 adev->asic_funcs = &soc15_asic_funcs;
1149 if (adev->pdev->device == 0x15dd)
1150 adev->apu_flags |= AMD_APU_IS_RAVEN;
1151 if (adev->pdev->device == 0x15d8)
1152 adev->apu_flags |= AMD_APU_IS_PICASSO;
1153 if (adev->rev_id >= 0x8)
1154 adev->apu_flags |= AMD_APU_IS_RAVEN2;
1156 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1157 adev->external_rev_id = adev->rev_id + 0x79;
1158 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1159 adev->external_rev_id = adev->rev_id + 0x41;
1160 else if (adev->rev_id == 1)
1161 adev->external_rev_id = adev->rev_id + 0x20;
1163 adev->external_rev_id = adev->rev_id + 0x01;
1165 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1166 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1182 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1183 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1184 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1202 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1205 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1225 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1229 adev->asic_funcs = &vega20_asic_funcs;
1230 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1244 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1245 adev->external_rev_id = adev->rev_id + 0x32;
1248 adev->asic_funcs = &soc15_asic_funcs;
1249 if ((adev->pdev->device == 0x1636) ||
1250 (adev->pdev->device == 0x164c))
1251 adev->apu_flags |= AMD_APU_IS_RENOIR;
1253 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1255 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1256 adev->external_rev_id = adev->rev_id + 0x91;
1258 adev->external_rev_id = adev->rev_id + 0xa1;
1259 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1279 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1289 if (amdgpu_sriov_vf(adev)) {
1290 amdgpu_virt_init_setting(adev);
1291 xgpu_ai_mailbox_set_irq_funcs(adev);
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1302 if (amdgpu_sriov_vf(adev))
1303 xgpu_ai_mailbox_get_irq(adev);
1305 if (adev->asic_funcs &&
1306 adev->asic_funcs->reset_hdp_ras_error_count)
1307 adev->asic_funcs->reset_hdp_ras_error_count(adev);
1309 if (adev->nbio.funcs->ras_late_init)
1310 r = adev->nbio.funcs->ras_late_init(adev);
1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1319 if (amdgpu_sriov_vf(adev))
1320 xgpu_ai_mailbox_add_irq_id(adev);
1322 adev->df.funcs->sw_init(adev);
1329 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1331 amdgpu_nbio_ras_fini(adev);
1332 adev->df.funcs->sw_fini(adev);
1336 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1342 if (!amdgpu_sriov_vf(adev)) {
1343 for (i = 0; i < adev->sdma.num_instances; i++) {
1344 ring = &adev->sdma.instance[i].ring;
1345 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1347 adev->doorbell_index.sdma_doorbell_range);
1350 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1351 adev->irq.ih.doorbell_index);
1357 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1360 soc15_pcie_gen3_enable(adev);
1362 soc15_program_aspm(adev);
1364 adev->nbio.funcs->init_registers(adev);
1369 if (adev->nbio.funcs->remap_hdp_registers)
1370 adev->nbio.funcs->remap_hdp_registers(adev);
1373 soc15_enable_doorbell_aperture(adev, true);
1379 soc15_doorbell_range_init(adev);
1386 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389 soc15_enable_doorbell_aperture(adev, false);
1390 if (amdgpu_sriov_vf(adev))
1391 xgpu_ai_mailbox_put_irq(adev);
1393 if (adev->nbio.ras_if &&
1394 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1395 if (adev->nbio.funcs->init_ras_controller_interrupt)
1396 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1397 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1398 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1406 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1408 return soc15_common_hw_fini(adev);
1413 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1415 return soc15_common_hw_init(adev);
1433 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1437 if (adev->asic_type == CHIP_VEGA20 ||
1438 adev->asic_type == CHIP_ARCTURUS ||
1439 adev->asic_type == CHIP_RENOIR) {
1442 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1458 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1468 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1474 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1497 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1503 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1512 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1519 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1533 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1535 if (amdgpu_sriov_vf(adev))
1538 switch (adev->asic_type) {
1542 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1544 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1546 soc15_update_hdp_light_sleep(adev,
1548 soc15_update_drm_clock_gating(adev,
1550 soc15_update_drm_light_sleep(adev,
1552 soc15_update_rom_medium_grain_clock_gating(adev,
1554 adev->df.funcs->update_medium_grain_clock_gating(adev,
1559 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1561 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1563 soc15_update_hdp_light_sleep(adev,
1565 soc15_update_drm_clock_gating(adev,
1567 soc15_update_drm_light_sleep(adev,
1569 soc15_update_rom_medium_grain_clock_gating(adev,
1573 soc15_update_hdp_light_sleep(adev,
1584 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1587 if (amdgpu_sriov_vf(adev))
1590 adev->nbio.funcs->get_clockgating_state(adev, flags);
1612 adev->df.funcs->get_clockgating_state(adev, flags);