Lines Matching defs:rdev

45  * @rdev: radeon_device pointer
50 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
55 if (rdev->wb.enabled)
56 rptr = rdev->wb.wb[ring->rptr_offs/4];
66 * @rdev: radeon_device pointer
71 uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
80 * @rdev: radeon_device pointer
85 void r600_dma_set_wptr(struct radeon_device *rdev,
94 * @rdev: radeon_device pointer
98 void r600_dma_stop(struct radeon_device *rdev)
102 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
103 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
108 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
114 * @rdev: radeon_device pointer
119 int r600_dma_resume(struct radeon_device *rdev)
121 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
143 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
145 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
147 if (rdev->wb.enabled)
163 if (rdev->family >= CHIP_RV770)
173 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
179 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
180 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
188 * @rdev: radeon_device pointer
192 void r600_dma_fini(struct radeon_device *rdev)
194 r600_dma_stop(rdev);
195 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
201 * @rdev: radeon_device pointer
207 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
209 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
212 radeon_ring_lockup_update(rdev, ring);
215 return radeon_ring_test_lockup(rdev, ring);
222 * @rdev: radeon_device pointer
229 int r600_dma_ring_test(struct radeon_device *rdev,
243 gpu_addr = rdev->wb.gpu_addr + index;
246 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
248 r = radeon_ring_lock(rdev, ring, 4);
257 radeon_ring_unlock_commit(rdev, ring, false);
259 for (i = 0; i < rdev->usec_timeout; i++) {
260 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
266 if (i < rdev->usec_timeout) {
279 * @rdev: radeon_device pointer
286 void r600_dma_fence_ring_emit(struct radeon_device *rdev,
289 struct radeon_ring *ring = &rdev->ring[fence->ring];
290 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
304 * @rdev: radeon_device pointer
312 bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
330 * @rdev: radeon_device pointer
336 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
350 gpu_addr = rdev->wb.gpu_addr + index;
352 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
364 r = radeon_ib_schedule(rdev, &ib, NULL, false);
366 radeon_ib_free(rdev, &ib);
380 for (i = 0; i < rdev->usec_timeout; i++) {
381 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
386 if (i < rdev->usec_timeout) {
392 radeon_ib_free(rdev, &ib);
399 * @rdev: radeon_device pointer
404 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
406 struct radeon_ring *ring = &rdev->ring[ib->ring];
408 if (rdev->wb.enabled) {
433 * @rdev: radeon_device pointer
443 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
450 int ring_index = rdev->asic->copy.dma_ring_index;
451 struct radeon_ring *ring = &rdev->ring[ring_index];
460 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
463 radeon_sync_free(rdev, &sync, NULL);
467 radeon_sync_resv(rdev, &sync, resv, false);
468 radeon_sync_rings(rdev, &sync, ring->idx);
484 r = radeon_fence_emit(rdev, &fence, ring->idx);
486 radeon_ring_unlock_undo(rdev, ring);
487 radeon_sync_free(rdev, &sync, NULL);
491 radeon_ring_unlock_commit(rdev, ring, false);
492 radeon_sync_free(rdev, &sync, fence);