Lines Matching refs:adev
150 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
153 if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
156 switch (adev->ip_versions[UVD_HWIP][0]) {
160 if (amdgpu_sriov_vf(adev)) {
161 if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
162 !amdgpu_sriov_is_av1_support(adev)) {
174 if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
192 static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
200 spin_lock_irqsave(&adev->didt_idx_lock, flags);
203 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
207 static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
214 spin_lock_irqsave(&adev->didt_idx_lock, flags);
217 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
220 static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
222 return adev->nbio.funcs->get_memsize(adev);
225 static u32 soc21_get_xclk(struct amdgpu_device *adev)
227 return adev->clock.spll.reference_freq;
231 void soc21_grbm_select(struct amdgpu_device *adev,
243 static bool soc21_read_disabled_bios(struct amdgpu_device *adev)
271 static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
276 mutex_lock(&adev->grbm_idx_mutex);
278 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
283 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
284 mutex_unlock(&adev->grbm_idx_mutex);
288 static uint32_t soc21_get_register_value(struct amdgpu_device *adev,
293 return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset);
295 if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config)
296 return adev->gfx.config.gb_addr_config;
301 static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
310 if (!adev->reg_offset[en->hwip][en->inst])
312 else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
316 *value = soc21_get_register_value(adev,
325 static int soc21_asic_mode1_reset(struct amdgpu_device *adev)
330 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
333 pci_clear_master(adev->pdev);
335 amdgpu_device_cache_pci_state(adev->pdev);
337 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
338 dev_info(adev->dev, "GPU smu mode1 reset\n");
339 ret = amdgpu_dpm_mode1_reset(adev);
341 dev_info(adev->dev, "GPU psp mode1 reset\n");
342 ret = psp_gpu_reset(adev);
346 dev_err(adev->dev, "GPU mode1 reset failed\n");
347 amdgpu_device_load_pci_state(adev->pdev);
350 for (i = 0; i < adev->usec_timeout; i++) {
351 u32 memsize = adev->nbio.funcs->get_memsize(adev);
358 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
365 soc21_asic_reset_method(struct amdgpu_device *adev)
373 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
376 switch (adev->ip_versions[MP1_HWIP][0]) {
385 if (amdgpu_dpm_is_baco_supported(adev))
392 static int soc21_asic_reset(struct amdgpu_device *adev)
396 switch (soc21_asic_reset_method(adev)) {
398 dev_info(adev->dev, "PCI reset\n");
399 ret = amdgpu_device_pci_reset(adev);
402 dev_info(adev->dev, "BACO reset\n");
403 ret = amdgpu_dpm_baco_reset(adev);
406 dev_info(adev->dev, "MODE2 reset\n");
407 ret = amdgpu_dpm_mode2_reset(adev);
410 dev_info(adev->dev, "MODE1 reset\n");
411 ret = amdgpu_device_mode1_reset(adev);
418 static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
424 static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
430 static void soc21_program_aspm(struct amdgpu_device *adev)
432 if (!amdgpu_device_should_use_aspm(adev))
435 if (!(adev->flags & AMD_IS_APU) &&
436 (adev->nbio.funcs->program_aspm))
437 adev->nbio.funcs->program_aspm(adev);
448 static bool soc21_need_full_reset(struct amdgpu_device *adev)
450 switch (adev->ip_versions[GC_HWIP][0]) {
452 return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
461 static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
465 if (adev->flags & AMD_IS_APU)
478 static void soc21_init_doorbell_index(struct amdgpu_device *adev)
480 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
481 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
482 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
483 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
484 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
485 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
486 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
487 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
488 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
489 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
490 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
491 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
492 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
493 adev->doorbell_index.gfx_userqueue_start =
495 adev->doorbell_index.gfx_userqueue_end =
497 adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
498 adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
499 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
500 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
501 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
502 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
503 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
504 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
505 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
506 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
507 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
509 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
510 adev->doorbell_index.sdma_doorbell_range = 20;
513 static void soc21_pre_asic_init(struct amdgpu_device *adev)
517 static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
521 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
523 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
525 if (adev->gfx.funcs->update_perfmon_mgcg)
526 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
554 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
556 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
557 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
558 adev->smc_rreg = NULL;
559 adev->smc_wreg = NULL;
560 adev->pcie_rreg = &amdgpu_device_indirect_rreg;
561 adev->pcie_wreg = &amdgpu_device_indirect_wreg;
562 adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
563 adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
564 adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
565 adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
568 adev->uvd_ctx_rreg = NULL;
569 adev->uvd_ctx_wreg = NULL;
571 adev->didt_rreg = &soc21_didt_rreg;
572 adev->didt_wreg = &soc21_didt_wreg;
574 adev->asic_funcs = &soc21_asic_funcs;
576 adev->rev_id = amdgpu_device_get_rev_id(adev);
577 adev->external_rev_id = 0xff;
578 switch (adev->ip_versions[GC_HWIP][0]) {
580 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
598 adev->pg_flags = AMD_PG_SUPPORT_VCN |
603 adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
606 adev->cg_flags =
616 adev->pg_flags =
622 adev->external_rev_id = adev->rev_id + 0x10;
625 adev->cg_flags =
643 adev->pg_flags =
648 adev->external_rev_id = adev->rev_id + 0x1;
651 adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
660 adev->pg_flags = AMD_PG_SUPPORT_VCN |
663 adev->external_rev_id = adev->rev_id + 0x20;
666 adev->cg_flags =
684 adev->pg_flags = AMD_PG_SUPPORT_VCN |
688 adev->external_rev_id = adev->rev_id + 0x80;
696 if (amdgpu_sriov_vf(adev)) {
697 amdgpu_virt_init_setting(adev);
698 xgpu_nv_mailbox_set_irq_funcs(adev);
706 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
708 if (amdgpu_sriov_vf(adev)) {
709 xgpu_nv_mailbox_get_irq(adev);
710 if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
711 !amdgpu_sriov_is_av1_support(adev)) {
712 amdgpu_virt_update_sriov_video_codec(adev,
718 amdgpu_virt_update_sriov_video_codec(adev,
725 if (adev->nbio.ras &&
726 adev->nbio.ras_err_event_athub_irq.funcs)
731 amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
737 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
746 if (amdgpu_sriov_vf(adev))
747 xgpu_nv_mailbox_add_irq_id(adev);
759 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
762 soc21_program_aspm(adev);
764 adev->nbio.funcs->init_registers(adev);
769 if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
770 adev->nbio.funcs->remap_hdp_registers(adev);
772 adev->nbio.funcs->enable_doorbell_aperture(adev, true);
779 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
786 adev->nbio.funcs->enable_doorbell_aperture(adev, false);
787 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
789 if (amdgpu_sriov_vf(adev)) {
790 xgpu_nv_mailbox_put_irq(adev);
792 if (adev->nbio.ras &&
793 adev->nbio.ras_err_event_athub_irq.funcs)
794 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
804 return soc21_common_hw_fini(adev);
809 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
811 return soc21_common_hw_init(adev);
832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
834 switch (adev->ip_versions[NBIO_HWIP][0]) {
838 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
840 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
842 adev->hdp.funcs->update_clock_gating(adev,
854 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
856 switch (adev->ip_versions[LSDMA_HWIP][0]) {
859 adev->lsdma.funcs->update_memory_power_gating(adev,
871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
873 adev->nbio.funcs->get_clockgating_state(adev, flags);
875 adev->hdp.funcs->get_clock_gating_state(adev, flags);