Lines Matching defs:rdev
125 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
126 static void si_pcie_gen3_enable(struct radeon_device *rdev);
127 static void si_program_aspm(struct radeon_device *rdev);
128 extern void sumo_rlc_fini(struct radeon_device *rdev);
129 extern int sumo_rlc_init(struct radeon_device *rdev);
130 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
131 extern void r600_ih_ring_fini(struct radeon_device *rdev);
132 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
133 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
134 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
135 extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
136 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
137 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
138 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
140 static void si_init_pg(struct radeon_device *rdev);
141 static void si_init_cg(struct radeon_device *rdev);
142 static void si_fini_pg(struct radeon_device *rdev);
143 static void si_fini_cg(struct radeon_device *rdev);
144 static void si_rlc_stop(struct radeon_device *rdev);
1233 static void si_init_golden_registers(struct radeon_device *rdev)
1235 switch (rdev->family) {
1237 radeon_program_register_sequence(rdev,
1240 radeon_program_register_sequence(rdev,
1243 radeon_program_register_sequence(rdev,
1246 radeon_program_register_sequence(rdev,
1251 radeon_program_register_sequence(rdev,
1254 radeon_program_register_sequence(rdev,
1257 radeon_program_register_sequence(rdev,
1262 radeon_program_register_sequence(rdev,
1265 radeon_program_register_sequence(rdev,
1268 radeon_program_register_sequence(rdev,
1271 radeon_program_register_sequence(rdev,
1276 radeon_program_register_sequence(rdev,
1279 radeon_program_register_sequence(rdev,
1282 radeon_program_register_sequence(rdev,
1287 radeon_program_register_sequence(rdev,
1290 radeon_program_register_sequence(rdev,
1293 radeon_program_register_sequence(rdev,
1305 * @rdev: radeon_device pointer
1312 int si_get_allowed_info_register(struct radeon_device *rdev,
1338 * @rdev: radeon_device pointer
1343 u32 si_get_xclk(struct radeon_device *rdev)
1345 u32 reference_clock = rdev->clock.spll.reference_freq;
1360 int si_get_temp(struct radeon_device *rdev)
1576 int si_mc_load_microcode(struct radeon_device *rdev)
1585 if (!rdev->mc_fw)
1588 if (rdev->new_fw) {
1590 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1595 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1598 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1600 ucode_size = rdev->mc_fw->size / 4;
1602 switch (rdev->family) {
1625 fw_data = (const __be32 *)rdev->mc_fw->data;
1637 if (rdev->new_fw) {
1647 if (rdev->new_fw)
1659 for (i = 0; i < rdev->usec_timeout; i++) {
1664 for (i = 0; i < rdev->usec_timeout; i++) {
1674 static int si_init_microcode(struct radeon_device *rdev)
1689 switch (rdev->family) {
1703 if ((rdev->pdev->revision == 0x81) &&
1704 ((rdev->pdev->device == 0x6810) ||
1705 (rdev->pdev->device == 0x6811)))
1718 if (((rdev->pdev->device == 0x6820) &&
1719 ((rdev->pdev->revision == 0x81) ||
1720 (rdev->pdev->revision == 0x83))) ||
1721 ((rdev->pdev->device == 0x6821) &&
1722 ((rdev->pdev->revision == 0x83) ||
1723 (rdev->pdev->revision == 0x87))) ||
1724 ((rdev->pdev->revision == 0x87) &&
1725 ((rdev->pdev->device == 0x6823) ||
1726 (rdev->pdev->device == 0x682b))))
1739 if (((rdev->pdev->revision == 0x81) &&
1740 ((rdev->pdev->device == 0x6600) ||
1741 (rdev->pdev->device == 0x6604) ||
1742 (rdev->pdev->device == 0x6605) ||
1743 (rdev->pdev->device == 0x6610))) ||
1744 ((rdev->pdev->revision == 0x83) &&
1745 (rdev->pdev->device == 0x6610)))
1757 if (((rdev->pdev->revision == 0x81) &&
1758 (rdev->pdev->device == 0x6660)) ||
1759 ((rdev->pdev->revision == 0x83) &&
1760 ((rdev->pdev->device == 0x6660) ||
1761 (rdev->pdev->device == 0x6663) ||
1762 (rdev->pdev->device == 0x6665) ||
1763 (rdev->pdev->device == 0x6667))))
1765 else if ((rdev->pdev->revision == 0xc3) &&
1766 (rdev->pdev->device == 0x6665))
1786 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1789 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1792 if (rdev->pfp_fw->size != pfp_req_size) {
1794 rdev->pfp_fw->size, fw_name);
1799 err = radeon_ucode_validate(rdev->pfp_fw);
1810 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1813 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1816 if (rdev->me_fw->size != me_req_size) {
1818 rdev->me_fw->size, fw_name);
1822 err = radeon_ucode_validate(rdev->me_fw);
1833 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1836 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1839 if (rdev->ce_fw->size != ce_req_size) {
1841 rdev->ce_fw->size, fw_name);
1845 err = radeon_ucode_validate(rdev->ce_fw);
1856 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1859 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1862 if (rdev->rlc_fw->size != rlc_req_size) {
1864 rdev->rlc_fw->size, fw_name);
1868 err = radeon_ucode_validate(rdev->rlc_fw);
1882 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1885 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1888 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1892 if ((rdev->mc_fw->size != mc_req_size) &&
1893 (rdev->mc_fw->size != mc2_req_size)) {
1895 rdev->mc_fw->size, fw_name);
1898 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1900 err = radeon_ucode_validate(rdev->mc_fw);
1916 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1919 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1922 release_firmware(rdev->smc_fw);
1923 rdev->smc_fw = NULL;
1925 } else if (rdev->smc_fw->size != smc_req_size) {
1927 rdev->smc_fw->size, fw_name);
1931 err = radeon_ucode_validate(rdev->smc_fw);
1942 rdev->new_fw = false;
1947 rdev->new_fw = true;
1954 release_firmware(rdev->pfp_fw);
1955 rdev->pfp_fw = NULL;
1956 release_firmware(rdev->me_fw);
1957 rdev->me_fw = NULL;
1958 release_firmware(rdev->ce_fw);
1959 rdev->ce_fw = NULL;
1960 release_firmware(rdev->rlc_fw);
1961 rdev->rlc_fw = NULL;
1962 release_firmware(rdev->mc_fw);
1963 rdev->mc_fw = NULL;
1964 release_firmware(rdev->smc_fw);
1965 rdev->smc_fw = NULL;
1971 static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
2009 for (i = 0; i < rdev->usec_timeout; i++) {
2030 static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
2298 static void dce6_program_watermarks(struct radeon_device *rdev,
2323 if (rdev->family == CHIP_ARUBA)
2324 dram_channels = evergreen_get_number_of_dram_channels(rdev);
2326 dram_channels = si_get_number_of_dram_channels(rdev);
2329 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2331 radeon_dpm_get_mclk(rdev, false) * 10;
2333 radeon_dpm_get_sclk(rdev, false) * 10;
2335 wm_high.yclk = rdev->pm.current_mclk * 10;
2336 wm_high.sclk = rdev->pm.current_sclk * 10;
2356 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2358 radeon_dpm_get_mclk(rdev, true) * 10;
2360 radeon_dpm_get_sclk(rdev, true) * 10;
2362 wm_low.yclk = rdev->pm.current_mclk * 10;
2363 wm_low.sclk = rdev->pm.current_sclk * 10;
2392 (rdev->disp_priority == 2)) {
2400 (rdev->disp_priority == 2)) {
2464 void dce6_bandwidth_update(struct radeon_device *rdev)
2471 if (!rdev->mode_info.mode_config_initialized)
2474 radeon_update_display_priority(rdev);
2476 for (i = 0; i < rdev->num_crtc; i++) {
2477 if (rdev->mode_info.crtcs[i]->base.enabled)
2480 for (i = 0; i < rdev->num_crtc; i += 2) {
2481 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2482 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2483 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2484 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2485 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2486 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2493 static void si_tiling_mode_table_init(struct radeon_device *rdev)
2495 u32 *tile = rdev->config.si.tile_mode_array;
2497 ARRAY_SIZE(rdev->config.si.tile_mode_array);
2500 switch (rdev->config.si.mem_row_size_in_kb) {
2516 switch(rdev->family) {
2947 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
2951 static void si_select_se_sh(struct radeon_device *rdev,
2978 static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2996 static void si_setup_spi(struct radeon_device *rdev,
3005 si_select_se_sh(rdev, i, j);
3007 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
3020 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3023 static u32 si_get_rb_disabled(struct radeon_device *rdev,
3043 static void si_setup_rb(struct radeon_device *rdev,
3054 si_select_se_sh(rdev, i, j);
3055 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3059 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3068 rdev->config.si.backend_enable_mask = enabled_rbs;
3071 si_select_se_sh(rdev, i, 0xffffffff);
3090 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3093 static void si_gpu_init(struct radeon_device *rdev)
3102 switch (rdev->family) {
3104 rdev->config.si.max_shader_engines = 2;
3105 rdev->config.si.max_tile_pipes = 12;
3106 rdev->config.si.max_cu_per_sh = 8;
3107 rdev->config.si.max_sh_per_se = 2;
3108 rdev->config.si.max_backends_per_se = 4;
3109 rdev->config.si.max_texture_channel_caches = 12;
3110 rdev->config.si.max_gprs = 256;
3111 rdev->config.si.max_gs_threads = 32;
3112 rdev->config.si.max_hw_contexts = 8;
3114 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3115 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3116 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3117 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3121 rdev->config.si.max_shader_engines = 2;
3122 rdev->config.si.max_tile_pipes = 8;
3123 rdev->config.si.max_cu_per_sh = 5;
3124 rdev->config.si.max_sh_per_se = 2;
3125 rdev->config.si.max_backends_per_se = 4;
3126 rdev->config.si.max_texture_channel_caches = 8;
3127 rdev->config.si.max_gprs = 256;
3128 rdev->config.si.max_gs_threads = 32;
3129 rdev->config.si.max_hw_contexts = 8;
3131 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3132 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3133 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3134 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3139 rdev->config.si.max_shader_engines = 1;
3140 rdev->config.si.max_tile_pipes = 4;
3141 rdev->config.si.max_cu_per_sh = 5;
3142 rdev->config.si.max_sh_per_se = 2;
3143 rdev->config.si.max_backends_per_se = 4;
3144 rdev->config.si.max_texture_channel_caches = 4;
3145 rdev->config.si.max_gprs = 256;
3146 rdev->config.si.max_gs_threads = 32;
3147 rdev->config.si.max_hw_contexts = 8;
3149 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3150 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3151 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3152 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3156 rdev->config.si.max_shader_engines = 1;
3157 rdev->config.si.max_tile_pipes = 4;
3158 rdev->config.si.max_cu_per_sh = 6;
3159 rdev->config.si.max_sh_per_se = 1;
3160 rdev->config.si.max_backends_per_se = 2;
3161 rdev->config.si.max_texture_channel_caches = 4;
3162 rdev->config.si.max_gprs = 256;
3163 rdev->config.si.max_gs_threads = 16;
3164 rdev->config.si.max_hw_contexts = 8;
3166 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3167 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3168 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3169 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3173 rdev->config.si.max_shader_engines = 1;
3174 rdev->config.si.max_tile_pipes = 4;
3175 rdev->config.si.max_cu_per_sh = 5;
3176 rdev->config.si.max_sh_per_se = 1;
3177 rdev->config.si.max_backends_per_se = 1;
3178 rdev->config.si.max_texture_channel_caches = 2;
3179 rdev->config.si.max_gprs = 256;
3180 rdev->config.si.max_gs_threads = 16;
3181 rdev->config.si.max_hw_contexts = 8;
3183 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3184 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3185 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3186 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3204 evergreen_fix_pci_max_read_req_size(rdev);
3211 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
3212 rdev->config.si.mem_max_burst_length_bytes = 256;
3214 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3215 if (rdev->config.si.mem_row_size_in_kb > 4)
3216 rdev->config.si.mem_row_size_in_kb = 4;
3218 rdev->config.si.shader_engine_tile_size = 32;
3219 rdev->config.si.num_gpus = 1;
3220 rdev->config.si.multi_gpu_tile_size = 64;
3224 switch (rdev->config.si.mem_row_size_in_kb) {
3244 rdev->config.si.tile_config = 0;
3245 switch (rdev->config.si.num_tile_pipes) {
3247 rdev->config.si.tile_config |= (0 << 0);
3250 rdev->config.si.tile_config |= (1 << 0);
3253 rdev->config.si.tile_config |= (2 << 0);
3258 rdev->config.si.tile_config |= (3 << 0);
3263 rdev->config.si.tile_config |= 0 << 4;
3266 rdev->config.si.tile_config |= 1 << 4;
3270 rdev->config.si.tile_config |= 2 << 4;
3273 rdev->config.si.tile_config |=
3275 rdev->config.si.tile_config |=
3284 if (rdev->has_uvd) {
3290 si_tiling_mode_table_init(rdev);
3292 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3293 rdev->config.si.max_sh_per_se,
3294 rdev->config.si.max_backends_per_se);
3296 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3297 rdev->config.si.max_sh_per_se,
3298 rdev->config.si.max_cu_per_sh);
3300 rdev->config.si.active_cus = 0;
3301 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3302 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3303 rdev->config.si.active_cus +=
3304 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3318 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3319 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3320 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3321 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3362 static void si_scratch_init(struct radeon_device *rdev)
3366 rdev->scratch.num_reg = 7;
3367 rdev->scratch.reg_base = SCRATCH_REG0;
3368 for (i = 0; i < rdev->scratch.num_reg; i++) {
3369 rdev->scratch.free[i] = true;
3370 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3374 void si_fence_ring_emit(struct radeon_device *rdev,
3377 struct radeon_ring *ring = &rdev->ring[fence->ring];
3378 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3404 void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3406 struct radeon_ring *ring = &rdev->ring[ib->ring];
3424 } else if (rdev->wb.enabled) {
3464 static void si_cp_enable(struct radeon_device *rdev, bool enable)
3469 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3470 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3473 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3474 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3475 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3480 static int si_cp_load_microcode(struct radeon_device *rdev)
3484 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3487 si_cp_enable(rdev, false);
3489 if (rdev->new_fw) {
3491 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3493 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3495 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3505 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3514 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3523 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3533 fw_data = (const __be32 *)rdev->pfp_fw->data;
3540 fw_data = (const __be32 *)rdev->ce_fw->data;
3547 fw_data = (const __be32 *)rdev->me_fw->data;
3561 static int si_cp_start(struct radeon_device *rdev)
3563 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3566 r = radeon_ring_lock(rdev, ring, 7 + 4);
3575 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3585 radeon_ring_unlock_commit(rdev, ring, false);
3587 si_cp_enable(rdev, true);
3589 r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3614 radeon_ring_unlock_commit(rdev, ring, false);
3617 ring = &rdev->ring[i];
3618 r = radeon_ring_lock(rdev, ring, 2);
3628 radeon_ring_unlock_commit(rdev, ring, false);
3634 static void si_cp_fini(struct radeon_device *rdev)
3637 si_cp_enable(rdev, false);
3639 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3640 radeon_ring_fini(rdev, ring);
3641 radeon_scratch_free(rdev, ring->rptr_save_reg);
3643 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3644 radeon_ring_fini(rdev, ring);
3645 radeon_scratch_free(rdev, ring->rptr_save_reg);
3647 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3648 radeon_ring_fini(rdev, ring);
3649 radeon_scratch_free(rdev, ring->rptr_save_reg);
3652 static int si_cp_resume(struct radeon_device *rdev)
3659 si_enable_gui_idle_interrupt(rdev, false);
3668 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3672 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3686 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3687 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3689 if (rdev->wb.enabled)
3703 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3717 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3718 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3727 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3741 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3742 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3750 si_cp_start(rdev);
3751 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3752 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3753 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3754 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3756 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3757 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3758 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3761 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3763 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3765 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3767 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3770 si_enable_gui_idle_interrupt(rdev, true);
3772 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3773 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3778 u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
3842 if (evergreen_is_display_hung(rdev))
3859 static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3868 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3870 evergreen_print_gpu_status_regs(rdev);
3871 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3873 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3877 si_fini_pg(rdev);
3878 si_fini_cg(rdev);
3881 si_rlc_stop(rdev);
3901 evergreen_mc_stop(rdev, &save);
3902 if (evergreen_mc_wait_for_idle(rdev)) {
3903 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3957 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3971 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3985 evergreen_mc_resume(rdev, &save);
3988 evergreen_print_gpu_status_regs(rdev);
3991 static void si_set_clk_bypass_mode(struct radeon_device *rdev)
4003 for (i = 0; i < rdev->usec_timeout; i++) {
4018 static void si_spll_powerdown(struct radeon_device *rdev)
4039 static void si_gpu_pci_config_reset(struct radeon_device *rdev)
4044 dev_info(rdev->dev, "GPU pci config reset\n");
4049 si_fini_pg(rdev);
4050 si_fini_cg(rdev);
4065 si_rlc_stop(rdev);
4070 evergreen_mc_stop(rdev, &save);
4071 if (evergreen_mc_wait_for_idle(rdev)) {
4072 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4076 si_set_clk_bypass_mode(rdev);
4078 si_spll_powerdown(rdev);
4080 pci_clear_master(rdev->pdev);
4082 radeon_pci_config_reset(rdev);
4084 for (i = 0; i < rdev->usec_timeout; i++) {
4091 int si_asic_reset(struct radeon_device *rdev, bool hard)
4096 si_gpu_pci_config_reset(rdev);
4100 reset_mask = si_gpu_check_soft_reset(rdev);
4103 r600_set_bios_scratch_engine_hung(rdev, true);
4106 si_gpu_soft_reset(rdev, reset_mask);
4108 reset_mask = si_gpu_check_soft_reset(rdev);
4112 si_gpu_pci_config_reset(rdev);
4114 reset_mask = si_gpu_check_soft_reset(rdev);
4117 r600_set_bios_scratch_engine_hung(rdev, false);
4125 * @rdev: radeon_device pointer
4131 bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4133 u32 reset_mask = si_gpu_check_soft_reset(rdev);
4138 radeon_ring_lockup_update(rdev, ring);
4141 return radeon_ring_test_lockup(rdev, ring);
4145 static void si_mc_program(struct radeon_device *rdev)
4161 evergreen_mc_stop(rdev, &save);
4162 if (radeon_mc_wait_for_idle(rdev)) {
4163 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4165 if (!ASIC_IS_NODCE(rdev))
4170 rdev->mc.vram_start >> 12);
4172 rdev->mc.vram_end >> 12);
4174 rdev->vram_scratch.gpu_addr >> 12);
4175 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4176 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4179 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4185 if (radeon_mc_wait_for_idle(rdev)) {
4186 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4188 evergreen_mc_resume(rdev, &save);
4189 if (!ASIC_IS_NODCE(rdev)) {
4192 rv515_vga_render_disable(rdev);
4196 void si_vram_gtt_location(struct radeon_device *rdev,
4201 dev_warn(rdev->dev, "limiting VRAM\n");
4205 radeon_vram_location(rdev, &rdev->mc, 0);
4206 rdev->mc.gtt_base_align = 0;
4207 radeon_gtt_location(rdev, mc);
4210 static int si_mc_init(struct radeon_device *rdev)
4216 rdev->mc.vram_is_ddr = true;
4256 rdev->mc.vram_width = numchan * chansize;
4258 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4259 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4268 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4269 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
4270 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4271 si_vram_gtt_location(rdev, &rdev->mc);
4272 radeon_update_bandwidth_info(rdev);
4280 void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4289 static int si_pcie_gart_enable(struct radeon_device *rdev)
4293 if (rdev->gart.robj == NULL) {
4294 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4297 r = radeon_gart_table_vram_pin(rdev);
4320 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4321 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4322 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4324 (u32)(rdev->dummy_page.addr >> 12));
4336 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
4344 rdev->vm_manager.saved_table_addr[i]);
4347 rdev->vm_manager.saved_table_addr[i]);
4352 (u32)(rdev->dummy_page.addr >> 12));
4369 si_pcie_gart_tlb_flush(rdev);
4371 (unsigned)(rdev->mc.gtt_size >> 20),
4372 (unsigned long long)rdev->gart.table_addr);
4373 rdev->gart.ready = true;
4377 static void si_pcie_gart_disable(struct radeon_device *rdev)
4387 rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
4404 radeon_gart_table_vram_unpin(rdev);
4407 static void si_pcie_gart_fini(struct radeon_device *rdev)
4409 si_pcie_gart_disable(rdev);
4410 radeon_gart_table_vram_free(rdev);
4411 radeon_gart_fini(rdev);
4461 static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4534 static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4652 static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4740 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4753 dev_err(rdev->dev, "Packet0 not allowed!\n");
4762 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4766 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4770 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4773 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
4781 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4802 int si_vm_init(struct radeon_device *rdev)
4805 rdev->vm_manager.nvm = 16;
4807 rdev->vm_manager.vram_base_offset = 0;
4812 void si_vm_fini(struct radeon_device *rdev)
4819 * @rdev: radeon_device pointer
4825 static void si_vm_decode_fault(struct radeon_device *rdev,
4833 if (rdev->family == CHIP_TAHITI) {
5080 void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5132 static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
5136 for (i = 0; i < rdev->usec_timeout; i++) {
5142 for (i = 0; i < rdev->usec_timeout; i++) {
5149 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
5167 for (i = 0; i < rdev->usec_timeout; i++) {
5175 static void si_set_uvd_dcm(struct radeon_device *rdev,
5196 void si_init_uvd_internal_cg(struct radeon_device *rdev)
5201 si_set_uvd_dcm(rdev, false);
5209 static u32 si_halt_rlc(struct radeon_device *rdev)
5219 si_wait_for_rlc_serdes(rdev);
5225 static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
5234 static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
5239 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
5247 static void si_init_dma_pg(struct radeon_device *rdev)
5258 static void si_enable_gfx_cgpg(struct radeon_device *rdev,
5263 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5283 static void si_init_gfx_cgpg(struct radeon_device *rdev)
5287 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5293 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5303 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
5308 si_select_se_sh(rdev, se, sh);
5311 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5318 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5326 static void si_init_ao_cu_mask(struct radeon_device *rdev)
5332 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5333 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5337 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
5338 if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
5359 static void si_enable_cgcg(struct radeon_device *rdev,
5366 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5367 si_enable_gui_idle_interrupt(rdev, true);
5371 tmp = si_halt_rlc(rdev);
5377 si_wait_for_rlc_serdes(rdev);
5379 si_update_rlc(rdev, tmp);
5385 si_enable_gui_idle_interrupt(rdev, false);
5399 static void si_enable_mgcg(struct radeon_device *rdev,
5404 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5410 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5422 tmp = si_halt_rlc(rdev);
5428 si_update_rlc(rdev, tmp);
5445 tmp = si_halt_rlc(rdev);
5451 si_update_rlc(rdev, tmp);
5455 static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5460 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5500 static void si_enable_mc_ls(struct radeon_device *rdev,
5508 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5517 static void si_enable_mc_mgcg(struct radeon_device *rdev,
5525 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5534 static void si_enable_dma_mgcg(struct radeon_device *rdev,
5540 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5571 static void si_enable_bif_mgls(struct radeon_device *rdev,
5578 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5589 static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5596 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5605 static void si_enable_hdp_ls(struct radeon_device *rdev,
5612 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5621 static void si_update_cg(struct radeon_device *rdev,
5625 si_enable_gui_idle_interrupt(rdev, false);
5628 si_enable_mgcg(rdev, true);
5629 si_enable_cgcg(rdev, true);
5631 si_enable_cgcg(rdev, false);
5632 si_enable_mgcg(rdev, false);
5634 si_enable_gui_idle_interrupt(rdev, true);
5638 si_enable_mc_mgcg(rdev, enable);
5639 si_enable_mc_ls(rdev, enable);
5643 si_enable_dma_mgcg(rdev, enable);
5647 si_enable_bif_mgls(rdev, enable);
5651 if (rdev->has_uvd) {
5652 si_enable_uvd_mgcg(rdev, enable);
5657 si_enable_hdp_mgcg(rdev, enable);
5658 si_enable_hdp_ls(rdev, enable);
5662 static void si_init_cg(struct radeon_device *rdev)
5664 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5669 if (rdev->has_uvd) {
5670 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
5671 si_init_uvd_internal_cg(rdev);
5675 static void si_fini_cg(struct radeon_device *rdev)
5677 if (rdev->has_uvd) {
5678 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
5680 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5687 u32 si_get_csb_size(struct radeon_device *rdev)
5693 if (rdev->rlc.cs_data == NULL)
5701 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5719 void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5725 if (rdev->rlc.cs_data == NULL)
5737 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5753 switch (rdev->family) {
5779 static void si_init_pg(struct radeon_device *rdev)
5781 if (rdev->pg_flags) {
5782 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5783 si_init_dma_pg(rdev);
5785 si_init_ao_cu_mask(rdev);
5786 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5787 si_init_gfx_cgpg(rdev);
5789 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5790 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5792 si_enable_dma_pg(rdev, true);
5793 si_enable_gfx_cgpg(rdev, true);
5795 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5796 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5800 static void si_fini_pg(struct radeon_device *rdev)
5802 if (rdev->pg_flags) {
5803 si_enable_dma_pg(rdev, false);
5804 si_enable_gfx_cgpg(rdev, false);
5811 void si_rlc_reset(struct radeon_device *rdev)
5823 static void si_rlc_stop(struct radeon_device *rdev)
5827 si_enable_gui_idle_interrupt(rdev, false);
5829 si_wait_for_rlc_serdes(rdev);
5832 static void si_rlc_start(struct radeon_device *rdev)
5836 si_enable_gui_idle_interrupt(rdev, true);
5841 static bool si_lbpw_supported(struct radeon_device *rdev)
5852 static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5864 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5869 static int si_rlc_resume(struct radeon_device *rdev)
5873 if (!rdev->rlc_fw)
5876 si_rlc_stop(rdev);
5878 si_rlc_reset(rdev);
5880 si_init_pg(rdev);
5882 si_init_cg(rdev);
5894 if (rdev->new_fw) {
5896 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5899 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
5909 (const __be32 *)rdev->rlc_fw->data;
5917 si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5919 si_rlc_start(rdev);
5924 static void si_enable_interrupts(struct radeon_device *rdev)
5933 rdev->ih.enabled = true;
5936 static void si_disable_interrupts(struct radeon_device *rdev)
5948 rdev->ih.enabled = false;
5949 rdev->ih.rptr = 0;
5952 static void si_disable_interrupt_state(struct radeon_device *rdev)
5968 for (i = 0; i < rdev->num_crtc; i++)
5970 for (i = 0; i < rdev->num_crtc; i++)
5973 if (!ASIC_IS_NODCE(rdev)) {
5982 static int si_irq_init(struct radeon_device *rdev)
5989 ret = r600_ih_ring_alloc(rdev);
5994 si_disable_interrupts(rdev);
5997 ret = si_rlc_resume(rdev);
5999 r600_ih_ring_fini(rdev);
6005 WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
6015 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
6016 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
6022 if (rdev->wb.enabled)
6026 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
6027 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
6038 if (rdev->msi_enabled)
6043 si_disable_interrupt_state(rdev);
6045 pci_set_master(rdev->pdev);
6048 si_enable_interrupts(rdev);
6054 int si_irq_set(struct radeon_device *rdev)
6063 if (!rdev->irq.installed) {
6068 if (!rdev->ih.enabled) {
6069 si_disable_interrupts(rdev);
6071 si_disable_interrupt_state(rdev);
6085 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
6089 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
6093 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
6097 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6102 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6116 if (rdev->irq.dpm_thermal) {
6121 for (i = 0; i < rdev->num_crtc; i++) {
6123 rdev, INT_MASK + crtc_offsets[i], VBLANK_INT_MASK,
6124 rdev->irq.crtc_vblank_int[i] ||
6125 atomic_read(&rdev->irq.pflip[i]), "vblank", i);
6128 for (i = 0; i < rdev->num_crtc; i++)
6131 if (!ASIC_IS_NODCE(rdev)) {
6134 rdev, DC_HPDx_INT_CONTROL(i),
6136 rdev->irq.hpd[i], "HPD", i);
6149 static inline void si_irq_ack(struct radeon_device *rdev)
6152 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
6153 u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
6155 if (ASIC_IS_NODCE(rdev))
6160 if (i < rdev->num_crtc)
6165 for (i = 0; i < rdev->num_crtc; i += 2) {
6193 static void si_irq_disable(struct radeon_device *rdev)
6195 si_disable_interrupts(rdev);
6198 si_irq_ack(rdev);
6199 si_disable_interrupt_state(rdev);
6202 static void si_irq_suspend(struct radeon_device *rdev)
6204 si_irq_disable(rdev);
6205 si_rlc_stop(rdev);
6208 static void si_irq_fini(struct radeon_device *rdev)
6210 si_irq_suspend(rdev);
6211 r600_ih_ring_fini(rdev);
6214 static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6218 if (rdev->wb.enabled)
6219 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6229 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
6230 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
6231 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6236 return (wptr & rdev->ih.ptr_mask);
6249 int si_irq_process(struct radeon_device *rdev)
6251 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
6264 if (!rdev->ih.enabled || rdev->shutdown)
6267 wptr = si_get_ih_wptr(rdev);
6271 if (atomic_xchg(&rdev->ih.lock, 1))
6274 rptr = rdev->ih.rptr;
6281 si_irq_ack(rdev);
6286 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6287 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6288 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6303 if (rdev->irq.crtc_vblank_int[crtc_idx]) {
6304 drm_handle_vblank(rdev->ddev, crtc_idx);
6305 rdev->pm.vblank_sync = true;
6306 wake_up(&rdev->irq.vblank_queue);
6308 if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
6309 radeon_crtc_handle_vblank(rdev,
6339 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6372 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6382 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6383 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
6385 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6387 si_vm_decode_fault(rdev, status, addr);
6390 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6393 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6396 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6402 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6405 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6408 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6414 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
6418 rdev->pm.dpm.thermal.high_to_low = false;
6423 rdev->pm.dpm.thermal.high_to_low = true;
6431 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6440 rptr &= rdev->ih.ptr_mask;
6444 schedule_work(&rdev->dp_work);
6446 schedule_delayed_work(&rdev->hotplug_work, 0);
6447 if (queue_thermal && rdev->pm.dpm_enabled)
6448 schedule_work(&rdev->pm.dpm.thermal.work);
6449 rdev->ih.rptr = rptr;
6451 atomic_set(&rdev->ih.lock, 0);
6454 wptr = si_get_ih_wptr(rdev);
6464 static void si_uvd_init(struct radeon_device *rdev)
6468 if (!rdev->has_uvd)
6471 r = radeon_uvd_init(rdev);
6473 dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
6475 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
6480 rdev->has_uvd = false;
6483 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
6484 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
6487 static void si_uvd_start(struct radeon_device *rdev)
6491 if (!rdev->has_uvd)
6494 r = uvd_v2_2_resume(rdev);
6496 dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
6499 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
6501 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
6507 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6510 static void si_uvd_resume(struct radeon_device *rdev)
6515 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
6518 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6519 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
6521 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
6524 r = uvd_v1_0_init(rdev);
6526 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
6531 static void si_vce_init(struct radeon_device *rdev)
6535 if (!rdev->has_vce)
6538 r = radeon_vce_init(rdev);
6540 dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
6542 * At this point rdev->vce.vcpu_bo is NULL which trickles down
6547 rdev->has_vce = false;
6550 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
6551 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
6552 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
6553 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
6556 static void si_vce_start(struct radeon_device *rdev)
6560 if (!rdev->has_vce)
6563 r = radeon_vce_resume(rdev);
6565 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6568 r = vce_v1_0_resume(rdev);
6570 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6573 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
6575 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
6578 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
6580 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
6586 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
6587 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
6590 static void si_vce_resume(struct radeon_device *rdev)
6595 if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
6598 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
6599 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
6601 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
6604 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
6605 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
6607 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
6610 r = vce_v1_0_init(rdev);
6612 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
6617 static int si_startup(struct radeon_device *rdev)
6623 si_pcie_gen3_enable(rdev);
6625 si_program_aspm(rdev);
6628 r = r600_vram_scratch_init(rdev);
6632 si_mc_program(rdev);
6634 if (!rdev->pm.dpm_enabled) {
6635 r = si_mc_load_microcode(rdev);
6642 r = si_pcie_gart_enable(rdev);
6645 si_gpu_init(rdev);
6648 if (rdev->family == CHIP_VERDE) {
6649 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6650 rdev->rlc.reg_list_size =
6653 rdev->rlc.cs_data = si_cs_data;
6654 r = sumo_rlc_init(rdev);
6661 r = radeon_wb_init(rdev);
6665 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
6667 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6671 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6673 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6677 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6679 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6683 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
6685 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6689 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6691 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6695 si_uvd_start(rdev);
6696 si_vce_start(rdev);
6699 if (!rdev->irq.installed) {
6700 r = radeon_irq_kms_init(rdev);
6705 r = si_irq_init(rdev);
6708 radeon_irq_kms_fini(rdev);
6711 si_irq_set(rdev);
6713 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6714 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6719 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6720 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6725 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6726 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6731 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6732 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6737 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6738 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6743 r = si_cp_load_microcode(rdev);
6746 r = si_cp_resume(rdev);
6750 r = cayman_dma_resume(rdev);
6754 si_uvd_resume(rdev);
6755 si_vce_resume(rdev);
6757 r = radeon_ib_pool_init(rdev);
6759 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
6763 r = radeon_vm_manager_init(rdev);
6765 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
6769 r = radeon_audio_init(rdev);
6776 int si_resume(struct radeon_device *rdev)
6785 atom_asic_init(rdev->mode_info.atom_context);
6788 si_init_golden_registers(rdev);
6790 if (rdev->pm.pm_method == PM_METHOD_DPM)
6791 radeon_pm_resume(rdev);
6793 rdev->accel_working = true;
6794 r = si_startup(rdev);
6797 rdev->accel_working = false;
6805 int si_suspend(struct radeon_device *rdev)
6807 radeon_pm_suspend(rdev);
6808 radeon_audio_fini(rdev);
6809 radeon_vm_manager_fini(rdev);
6810 si_cp_enable(rdev, false);
6811 cayman_dma_stop(rdev);
6812 if (rdev->has_uvd) {
6813 uvd_v1_0_fini(rdev);
6814 radeon_uvd_suspend(rdev);
6816 if (rdev->has_vce)
6817 radeon_vce_suspend(rdev);
6818 si_fini_pg(rdev);
6819 si_fini_cg(rdev);
6820 si_irq_suspend(rdev);
6821 radeon_wb_disable(rdev);
6822 si_pcie_gart_disable(rdev);
6832 int si_init(struct radeon_device *rdev)
6834 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6838 if (!radeon_get_bios(rdev)) {
6839 if (ASIC_IS_AVIVO(rdev))
6843 if (!rdev->is_atom_bios) {
6844 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
6847 r = radeon_atombios_init(rdev);
6852 if (!radeon_card_posted(rdev)) {
6853 if (!rdev->bios) {
6854 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
6858 atom_asic_init(rdev->mode_info.atom_context);
6861 si_init_golden_registers(rdev);
6863 si_scratch_init(rdev);
6865 radeon_surface_init(rdev);
6867 radeon_get_clock_info(rdev->ddev);
6870 r = radeon_fence_driver_init(rdev);
6875 r = si_mc_init(rdev);
6879 r = radeon_bo_init(rdev);
6883 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
6884 !rdev->rlc_fw || !rdev->mc_fw) {
6885 r = si_init_microcode(rdev);
6893 radeon_pm_init(rdev);
6895 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6897 r600_ring_init(rdev, ring, 1024 * 1024);
6899 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6901 r600_ring_init(rdev, ring, 1024 * 1024);
6903 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6905 r600_ring_init(rdev, ring, 1024 * 1024);
6907 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6909 r600_ring_init(rdev, ring, 64 * 1024);
6911 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6913 r600_ring_init(rdev, ring, 64 * 1024);
6915 si_uvd_init(rdev);
6916 si_vce_init(rdev);
6918 rdev->ih.ring_obj = NULL;
6919 r600_ih_ring_init(rdev, 64 * 1024);
6921 r = r600_pcie_gart_init(rdev);
6925 rdev->accel_working = true;
6926 r = si_startup(rdev);
6928 dev_err(rdev->dev, "disabling GPU acceleration\n");
6929 si_cp_fini(rdev);
6930 cayman_dma_fini(rdev);
6931 si_irq_fini(rdev);
6932 sumo_rlc_fini(rdev);
6933 radeon_wb_fini(rdev);
6934 radeon_ib_pool_fini(rdev);
6935 radeon_vm_manager_fini(rdev);
6936 radeon_irq_kms_fini(rdev);
6937 si_pcie_gart_fini(rdev);
6938 rdev->accel_working = false;
6945 if (!rdev->mc_fw) {
6953 void si_fini(struct radeon_device *rdev)
6955 radeon_pm_fini(rdev);
6956 si_cp_fini(rdev);
6957 cayman_dma_fini(rdev);
6958 si_fini_pg(rdev);
6959 si_fini_cg(rdev);
6960 si_irq_fini(rdev);
6961 sumo_rlc_fini(rdev);
6962 radeon_wb_fini(rdev);
6963 radeon_vm_manager_fini(rdev);
6964 radeon_ib_pool_fini(rdev);
6965 radeon_irq_kms_fini(rdev);
6966 if (rdev->has_uvd) {
6967 uvd_v1_0_fini(rdev);
6968 radeon_uvd_fini(rdev);
6970 if (rdev->has_vce)
6971 radeon_vce_fini(rdev);
6972 si_pcie_gart_fini(rdev);
6973 r600_vram_scratch_fini(rdev);
6974 radeon_gem_fini(rdev);
6975 radeon_fence_driver_fini(rdev);
6976 radeon_bo_fini(rdev);
6977 radeon_atombios_fini(rdev);
6978 kfree(rdev->bios);
6979 rdev->bios = NULL;
6985 * @rdev: radeon_device pointer
6990 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
6994 mutex_lock(&rdev->gpu_clock_mutex);
6998 mutex_unlock(&rdev->gpu_clock_mutex);
7002 int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7020 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
7040 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7077 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7091 static void si_pcie_gen3_enable(struct radeon_device *rdev)
7093 struct pci_dev *root = rdev->pdev->bus->self;
7099 if (pci_is_root_bus(rdev->pdev->bus))
7105 if (rdev->flags & RADEON_IS_IGP)
7108 if (!(rdev->flags & RADEON_IS_PCIE))
7136 if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
7147 pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
7165 pcie_capability_read_word(rdev->pdev,
7173 pcie_capability_read_word(rdev->pdev,
7179 pcie_capability_read_word(rdev->pdev,
7198 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
7215 pcie_capability_read_word(rdev->pdev,
7223 pcie_capability_write_word(rdev->pdev,
7239 pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
7247 pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
7253 for (i = 0; i < rdev->usec_timeout; i++) {
7261 static void si_program_aspm(struct radeon_device *rdev)
7270 if (!(rdev->flags & RADEON_IS_PCIE))
7328 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7377 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7384 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7390 !pci_is_root_bus(rdev->pdev->bus)) {
7391 struct pci_dev *root = rdev->pdev->bus->self;
7466 static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
7497 int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
7518 r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
7541 r = si_vce_send_vcepll_ctlreq(rdev);
7573 r = si_vce_send_vcepll_ctlreq(rdev);