Lines Matching defs:rdev
29 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
46 * @rdev: radeon_device pointer
51 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
56 if (rdev->wb.enabled)
57 rptr = rdev->wb.wb[ring->rptr_offs/4];
67 * @rdev: radeon_device pointer
72 uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
81 * @rdev: radeon_device pointer
86 void r600_dma_set_wptr(struct radeon_device *rdev,
95 * @rdev: radeon_device pointer
99 void r600_dma_stop(struct radeon_device *rdev)
103 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
104 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
109 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
115 * @rdev: radeon_device pointer
120 int r600_dma_resume(struct radeon_device *rdev)
122 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
144 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
146 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
148 if (rdev->wb.enabled)
164 if (rdev->family >= CHIP_RV770)
174 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
180 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
181 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
189 * @rdev: radeon_device pointer
193 void r600_dma_fini(struct radeon_device *rdev)
195 r600_dma_stop(rdev);
196 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
202 * @rdev: radeon_device pointer
208 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
210 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
213 radeon_ring_lockup_update(rdev, ring);
216 return radeon_ring_test_lockup(rdev, ring);
223 * @rdev: radeon_device pointer
230 int r600_dma_ring_test(struct radeon_device *rdev,
244 gpu_addr = rdev->wb.gpu_addr + index;
247 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
249 r = radeon_ring_lock(rdev, ring, 4);
258 radeon_ring_unlock_commit(rdev, ring, false);
260 for (i = 0; i < rdev->usec_timeout; i++) {
261 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
267 if (i < rdev->usec_timeout) {
280 * @rdev: radeon_device pointer
287 void r600_dma_fence_ring_emit(struct radeon_device *rdev,
290 struct radeon_ring *ring = &rdev->ring[fence->ring];
291 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
305 * @rdev: radeon_device pointer
313 bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
331 * @rdev: radeon_device pointer
337 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
351 gpu_addr = rdev->wb.gpu_addr + index;
353 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
365 r = radeon_ib_schedule(rdev, &ib, NULL, false);
367 radeon_ib_free(rdev, &ib);
381 for (i = 0; i < rdev->usec_timeout; i++) {
382 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
387 if (i < rdev->usec_timeout) {
393 radeon_ib_free(rdev, &ib);
400 * @rdev: radeon_device pointer
405 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
407 struct radeon_ring *ring = &rdev->ring[ib->ring];
409 if (rdev->wb.enabled) {
434 * @rdev: radeon_device pointer
444 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
451 int ring_index = rdev->asic->copy.dma_ring_index;
452 struct radeon_ring *ring = &rdev->ring[ring_index];
461 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
464 radeon_sync_free(rdev, &sync, NULL);
468 radeon_sync_resv(rdev, &sync, resv, false);
469 radeon_sync_rings(rdev, &sync, ring->idx);
485 r = radeon_fence_emit(rdev, &fence, ring->idx);
487 radeon_ring_unlock_undo(rdev, ring);
488 radeon_sync_free(rdev, &sync, NULL);
492 radeon_ring_unlock_commit(rdev, ring, false);
493 radeon_sync_free(rdev, &sync, fence);