Lines Matching defs:ring
35 * to the 3D engine (ring buffer, IBs, etc.), but the
48 * @ring: radeon ring pointer
53 struct radeon_ring *ring)
58 rptr = rdev->wb.wb[ring->rptr_offs/4];
60 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
75 * @ring: radeon ring pointer
80 struct radeon_ring *ring)
84 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
96 * @ring: radeon ring pointer
101 struct radeon_ring *ring)
105 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
110 WREG32(reg, (ring->wptr << 2) & 0x3fffc);
119 * Schedule an IB in the DMA ring (cayman-SI).
124 struct radeon_ring *ring = &rdev->ring[ib->ring];
125 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
128 u32 next_rptr = ring->wptr + 4;
132 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
133 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
134 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
135 radeon_ring_write(ring, next_rptr);
138 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
141 while ((ring->wptr & 7) != 5)
142 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
143 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
144 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
145 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
174 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
175 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
183 * Set up the DMA ring buffers and enable them. (cayman-SI).
188 struct radeon_ring *ring;
196 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
200 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
208 /* Set ring buffer size in dwords */
209 rb_bufsz = order_base_2(ring->ring_size / 4);
216 /* Initialize the ring buffer's read and write pointers */
229 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
242 ring->wptr = 0;
243 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
247 ring->ready = true;
249 r = radeon_ring_test(rdev, ring->idx, ring);
251 ring->ready = false;
273 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
274 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
281 * @ring: radeon_ring structure holding ring information
286 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
291 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
297 radeon_ring_lockup_update(rdev, ring);
300 return radeon_ring_test_lockup(rdev, ring);
448 void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
451 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
452 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
453 radeon_ring_write(ring, pd_addr >> 12);
456 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
457 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
458 radeon_ring_write(ring, 1);
461 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
462 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
463 radeon_ring_write(ring, 1 << vm_id);
466 radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
467 radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
468 radeon_ring_write(ring, 0); /* mask */
469 radeon_ring_write(ring, 0); /* value */