Lines Matching defs:ring
36 * to the 3D engine (ring buffer, IBs, etc.), but the
49 * @ring: radeon ring pointer
54 struct radeon_ring *ring)
59 rptr = rdev->wb.wb[ring->rptr_offs/4];
61 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
76 * @ring: radeon ring pointer
81 struct radeon_ring *ring)
85 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
97 * @ring: radeon ring pointer
102 struct radeon_ring *ring)
106 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
111 WREG32(reg, (ring->wptr << 2) & 0x3fffc);
120 * Schedule an IB in the DMA ring (cayman-SI).
125 struct radeon_ring *ring = &rdev->ring[ib->ring];
126 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
129 u32 next_rptr = ring->wptr + 4;
133 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
134 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
135 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
136 radeon_ring_write(ring, next_rptr);
139 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
142 while ((ring->wptr & 7) != 5)
143 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
144 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
145 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
146 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
175 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
176 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
184 * Set up the DMA ring buffers and enable them. (cayman-SI).
189 struct radeon_ring *ring;
197 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
201 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
209 /* Set ring buffer size in dwords */
210 rb_bufsz = order_base_2(ring->ring_size / 4);
217 /* Initialize the ring buffer's read and write pointers */
230 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
243 ring->wptr = 0;
244 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
248 ring->ready = true;
250 r = radeon_ring_test(rdev, ring->idx, ring);
252 ring->ready = false;
274 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
275 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
282 * @ring: radeon_ring structure holding ring information
287 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
292 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
298 radeon_ring_lockup_update(rdev, ring);
301 return radeon_ring_test_lockup(rdev, ring);
449 void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
452 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
453 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
454 radeon_ring_write(ring, pd_addr >> 12);
457 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
458 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
459 radeon_ring_write(ring, 1);
462 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
463 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
464 radeon_ring_write(ring, 1 << vm_id);
467 radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
468 radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
469 radeon_ring_write(ring, 0); /* mask */
470 radeon_ring_write(ring, 0); /* value */