Lines Matching refs:adev
174 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
177 if (adev->ip_versions[VCE_HWIP][0]) {
178 switch (adev->ip_versions[VCE_HWIP][0]) {
190 switch (adev->ip_versions[UVD_HWIP][0]) {
218 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
226 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
229 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
233 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
240 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
243 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
246 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
254 spin_lock_irqsave(&adev->didt_idx_lock, flags);
257 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
261 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
268 spin_lock_irqsave(&adev->didt_idx_lock, flags);
271 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
274 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
279 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
282 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
286 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
290 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
293 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
296 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
301 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
304 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
308 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
312 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
315 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
318 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
320 return adev->nbio.funcs->get_memsize(adev);
323 static u32 soc15_get_xclk(struct amdgpu_device *adev)
325 u32 reference_clock = adev->clock.spll.reference_freq;
327 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
328 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||
329 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))
331 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
332 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
339 void soc15_grbm_select(struct amdgpu_device *adev,
351 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
380 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
385 mutex_lock(&adev->grbm_idx_mutex);
387 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
392 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
393 mutex_unlock(&adev->grbm_idx_mutex);
397 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
402 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
405 return adev->gfx.config.gb_addr_config;
407 return adev->gfx.config.db_debug2;
412 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
421 if (!adev->reg_offset[en->hwip][en->inst])
423 else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
427 *value = soc15_get_register_value(adev,
439 * @adev: amdgpu_device pointer
447 void soc15_program_register_sequence(struct amdgpu_device *adev,
457 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
482 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
484 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
488 if (ras && adev->ras_enabled)
489 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
491 ret = amdgpu_dpm_baco_reset(adev);
496 if (ras && adev->ras_enabled)
497 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
503 soc15_asic_reset_method(struct amdgpu_device *adev)
507 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
509 if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
523 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
526 switch (adev->ip_versions[MP1_HWIP][0]) {
534 if (adev->asic_type == CHIP_VEGA20) {
535 if (adev->psp.sos.fw_version >= 0x80067)
536 baco_reset = amdgpu_dpm_is_baco_supported(adev);
541 if (ras && adev->ras_enabled &&
542 adev->pm.fw_version <= 0x283400)
545 baco_reset = amdgpu_dpm_is_baco_supported(adev);
563 else if (!(adev->flags & AMD_IS_APU))
577 static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
587 if (adev->flags & AMD_IS_APU && adev->in_s3 &&
588 !adev->suspend_complete &&
595 static int soc15_asic_reset(struct amdgpu_device *adev)
602 if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
603 (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
604 !soc15_need_reset_on_resume(adev))
607 switch (soc15_asic_reset_method(adev)) {
609 dev_info(adev->dev, "PCI reset\n");
610 return amdgpu_device_pci_reset(adev);
612 dev_info(adev->dev, "BACO reset\n");
613 return soc15_asic_baco_reset(adev);
615 dev_info(adev->dev, "MODE2 reset\n");
616 return amdgpu_dpm_mode2_reset(adev);
618 dev_info(adev->dev, "MODE1 reset\n");
619 return amdgpu_device_mode1_reset(adev);
623 static bool soc15_supports_baco(struct amdgpu_device *adev)
625 switch (adev->ip_versions[MP1_HWIP][0]) {
628 if (adev->asic_type == CHIP_VEGA20) {
629 if (adev->psp.sos.fw_version >= 0x80067)
630 return amdgpu_dpm_is_baco_supported(adev);
633 return amdgpu_dpm_is_baco_supported(adev);
641 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
647 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
651 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
655 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
660 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
667 static void soc15_program_aspm(struct amdgpu_device *adev)
669 if (!amdgpu_device_should_use_aspm(adev))
672 if (!(adev->flags & AMD_IS_APU) &&
673 (adev->nbio.funcs->program_aspm))
674 adev->nbio.funcs->program_aspm(adev);
686 static void soc15_reg_base_init(struct amdgpu_device *adev)
689 switch (adev->asic_type) {
694 vega10_reg_base_init(adev);
697 vega20_reg_base_init(adev);
700 arct_reg_base_init(adev);
703 aldebaran_reg_base_init(adev);
706 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
711 void soc15_set_virt_ops(struct amdgpu_device *adev)
713 adev->virt.ops = &xgpu_ai_virt_ops;
718 soc15_reg_base_init(adev);
721 static bool soc15_need_full_reset(struct amdgpu_device *adev)
727 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
737 if (adev->flags & AMD_IS_APU)
774 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
784 if (adev->flags & AMD_IS_APU)
823 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
828 if (adev->asic_type == CHIP_RENOIR)
834 if (!amdgpu_passthrough(adev))
837 if (adev->flags & AMD_IS_APU)
850 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
862 static void soc15_pre_asic_init(struct amdgpu_device *adev)
864 gmc_v9_0_restore_registers(adev);
934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
936 if (!amdgpu_sriov_vf(adev)) {
937 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
938 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
940 adev->smc_rreg = NULL;
941 adev->smc_wreg = NULL;
942 adev->pcie_rreg = &amdgpu_device_indirect_rreg;
943 adev->pcie_wreg = &amdgpu_device_indirect_wreg;
944 adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
945 adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
946 adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
947 adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
948 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
949 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
950 adev->didt_rreg = &soc15_didt_rreg;
951 adev->didt_wreg = &soc15_didt_wreg;
952 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
953 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
954 adev->se_cac_rreg = &soc15_se_cac_rreg;
955 adev->se_cac_wreg = &soc15_se_cac_wreg;
957 adev->rev_id = amdgpu_device_get_rev_id(adev);
958 adev->external_rev_id = 0xFF;
962 switch (adev->ip_versions[GC_HWIP][0]) {
964 adev->asic_funcs = &soc15_asic_funcs;
965 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
984 adev->pg_flags = 0;
985 adev->external_rev_id = 0x1;
988 adev->asic_funcs = &soc15_asic_funcs;
989 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1007 adev->pg_flags = 0;
1008 adev->external_rev_id = adev->rev_id + 0x14;
1011 adev->asic_funcs = &vega20_asic_funcs;
1012 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1030 adev->pg_flags = 0;
1031 adev->external_rev_id = adev->rev_id + 0x28;
1035 adev->asic_funcs = &soc15_asic_funcs;
1037 if (adev->rev_id >= 0x8)
1038 adev->apu_flags |= AMD_APU_IS_RAVEN2;
1040 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1041 adev->external_rev_id = adev->rev_id + 0x79;
1042 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1043 adev->external_rev_id = adev->rev_id + 0x41;
1044 else if (adev->rev_id == 1)
1045 adev->external_rev_id = adev->rev_id + 0x20;
1047 adev->external_rev_id = adev->rev_id + 0x01;
1049 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1050 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1065 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1066 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1067 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1085 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1088 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1107 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1111 adev->asic_funcs = &vega20_asic_funcs;
1112 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1126 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1127 adev->external_rev_id = adev->rev_id + 0x32;
1130 adev->asic_funcs = &soc15_asic_funcs;
1132 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1133 adev->external_rev_id = adev->rev_id + 0x91;
1135 adev->external_rev_id = adev->rev_id + 0xa1;
1136 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1155 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1161 adev->asic_funcs = &vega20_asic_funcs;
1162 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1170 adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1171 adev->external_rev_id = adev->rev_id + 0x3c;
1174 adev->asic_funcs = &aqua_vanjaram_asic_funcs;
1175 adev->cg_flags =
1181 adev->pg_flags =
1185 adev->external_rev_id = adev->rev_id + 0x46;
1187 if (!amdgpu_sriov_vf(adev)) {
1188 adev->rmmio_remap.reg_offset = 0x1A000;
1189 adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
1197 if (amdgpu_sriov_vf(adev)) {
1198 amdgpu_virt_init_setting(adev);
1199 xgpu_ai_mailbox_set_irq_funcs(adev);
1207 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1209 if (amdgpu_sriov_vf(adev))
1210 xgpu_ai_mailbox_get_irq(adev);
1215 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
1222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224 if (amdgpu_sriov_vf(adev))
1225 xgpu_ai_mailbox_add_irq_id(adev);
1227 if (adev->df.funcs &&
1228 adev->df.funcs->sw_init)
1229 adev->df.funcs->sw_init(adev);
1236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1238 if (adev->df.funcs &&
1239 adev->df.funcs->sw_fini)
1240 adev->df.funcs->sw_fini(adev);
1244 static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
1249 if (!amdgpu_sriov_vf(adev)) {
1250 for (i = 0; i < adev->sdma.num_instances; i++) {
1251 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1252 true, adev->doorbell_index.sdma_engine[i] << 1,
1253 adev->doorbell_index.sdma_doorbell_range);
1260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263 soc15_program_aspm(adev);
1265 adev->nbio.funcs->init_registers(adev);
1270 if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1271 adev->nbio.funcs->remap_hdp_registers(adev);
1274 adev->nbio.funcs->enable_doorbell_aperture(adev, true);
1282 soc15_sdma_doorbell_range_init(adev);
1289 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296 adev->nbio.funcs->enable_doorbell_aperture(adev, false);
1297 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
1299 if (amdgpu_sriov_vf(adev))
1300 xgpu_ai_mailbox_put_irq(adev);
1302 if (adev->nbio.ras_if &&
1303 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1304 if (adev->nbio.ras &&
1305 adev->nbio.ras->init_ras_controller_interrupt)
1306 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1307 if (adev->nbio.ras &&
1308 adev->nbio.ras->init_ras_err_event_athub_interrupt)
1309 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1319 return soc15_common_hw_fini(adev);
1324 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1326 if (soc15_need_reset_on_resume(adev)) {
1327 dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
1328 soc15_asic_reset(adev);
1330 return soc15_common_hw_init(adev);
1348 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1354 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1377 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1383 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1395 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1397 if (amdgpu_sriov_vf(adev))
1400 switch (adev->ip_versions[NBIO_HWIP][0]) {
1404 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1406 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1408 adev->hdp.funcs->update_clock_gating(adev,
1410 soc15_update_drm_clock_gating(adev,
1412 soc15_update_drm_light_sleep(adev,
1414 adev->smuio.funcs->update_rom_clock_gating(adev,
1416 adev->df.funcs->update_medium_grain_clock_gating(adev,
1422 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1424 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1426 adev->hdp.funcs->update_clock_gating(adev,
1428 soc15_update_drm_clock_gating(adev,
1430 soc15_update_drm_light_sleep(adev,
1435 adev->hdp.funcs->update_clock_gating(adev,
1446 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1449 if (amdgpu_sriov_vf(adev))
1452 if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
1453 adev->nbio.funcs->get_clockgating_state(adev, flags);
1455 if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
1456 adev->hdp.funcs->get_clock_gating_state(adev, flags);
1458 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
1472 if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
1473 adev->smuio.funcs->get_clock_gating_state(adev, flags);
1475 if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
1476 adev->df.funcs->get_clockgating_state(adev, flags);