Lines Matching refs:ring

53  * are no longer in use by the associated ring on the GPU and
64 * @ring: ring index the fence is associated with
68 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
70 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
84 * @ring: ring index the fence is associated with
89 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
91 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
110 * @ring: ring index we should work with
114 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
121 &rdev->fence_drv[ring].lockup_work,
126 * radeon_fence_emit - emit a fence on the requested ring
130 * @ring: ring index the fence is associated with
132 * Emits a fence command on the requested ring (all asics).
137 int ring)
141 /* we are protected by the ring emission mutex */
147 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
148 (*fence)->ring = ring;
152 rdev->fence_context + ring,
154 radeon_fence_ring_emit(rdev, ring, *fence);
155 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
156 radeon_fence_schedule_check(rdev, ring);
178 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
187 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
199 * @ring: ring index the fence is associated with
203 * on the ring, and the fence_queue should be waken up.
205 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
232 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
234 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
235 seq = radeon_fence_read(rdev, ring);
259 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
262 radeon_fence_schedule_check(rdev, ring);
279 int ring;
284 ring = fence_drv - &rdev->fence_drv[0];
288 radeon_fence_schedule_check(rdev, ring);
301 if (radeon_fence_activity(rdev, ring))
304 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
308 "0x%016llx last fence id 0x%016llx on ring %d)\n",
310 fence_drv->sync_seq[ring], ring);
323 * @ring: ring index the fence is associated with
328 void radeon_fence_process(struct radeon_device *rdev, int ring)
330 if (radeon_fence_activity(rdev, ring))
339 * @ring: ring index the fence is associated with
349 u64 seq, unsigned ring)
351 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
355 radeon_fence_process(rdev, ring);
356 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
366 unsigned ring = fence->ring;
369 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
374 radeon_fence_process(rdev, ring);
377 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
397 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
401 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
403 if (radeon_fence_activity(rdev, fence->ring))
407 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
408 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
416 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
417 rdev->fence_drv[fence->ring].delayed_irq = true;
418 radeon_fence_schedule_check(rdev, fence->ring);
427 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
444 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
485 * Wait for the requested sequence number(s) to be written by any ring
486 * (all asics). Sequnce number array is indexed by ring id.
565 seq[fence->ring] = fence->seq;
599 * radeon_fence_wait_any - wait for a fence to signal on any ring
606 * array is indexed by ring id. @intr selects whether to use
645 * @ring: ring index the fence is associated with
647 * Wait for the next fence on the requested ring to signal (all asics).
649 * Caller must hold ring lock.
651 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
656 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
657 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
672 * @ring: ring index the fence is associated with
674 * Wait for all fences on the requested ring to signal (all asics).
676 * Caller must hold ring lock.
678 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
683 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
684 if (!seq[ring])
692 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
693 ring, r);
733 * @ring: ring index the fence is associated with
735 * Get the number of fences emitted on the requested ring (all asics).
736 * Returns the number of emitted fences on the ring. Used by the
737 * dynpm code to ring track activity.
739 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
743 /* We are not protected by ring lock when reading the last sequence
746 radeon_fence_process(rdev, ring);
747 emitted = rdev->fence_drv[ring].sync_seq[ring]
748 - atomic64_read(&rdev->fence_drv[ring].last_seq);
760 * @dst_ring: which ring to check against
762 * Check if the fence needs to be synced against another ring
764 * Returns true if we need to sync with another ring, false if
775 if (fence->ring == dst_ring) {
779 /* we are protected by the ring mutex */
781 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
792 * @dst_ring: which ring to check against
795 * be synced with the requested ring (all asics).
806 if (fence->ring == dst_ring) {
810 /* we are protected by the ring mutex */
811 src = &fence->rdev->fence_drv[fence->ring];
823 * ready for use on the requested ring.
826 * @ring: ring index to start the fence driver on
833 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
838 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
839 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
840 rdev->fence_drv[ring].scratch_reg = 0;
841 if (ring != R600_RING_TYPE_UVD_INDEX) {
842 index = R600_WB_EVENT_OFFSET + ring * 4;
843 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
844 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
850 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
851 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
855 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
861 rdev->fence_drv[ring].scratch_reg -
863 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
864 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
866 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
867 rdev->fence_drv[ring].initialized = true;
868 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
869 ring, rdev->fence_drv[ring].gpu_addr);
875 * for the requested ring.
878 * @ring: ring index to start the fence driver on
880 * Init the fence driver for the requested ring (all asics).
883 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
887 rdev->fence_drv[ring].scratch_reg = -1;
888 rdev->fence_drv[ring].cpu_addr = NULL;
889 rdev->fence_drv[ring].gpu_addr = 0;
891 rdev->fence_drv[ring].sync_seq[i] = 0;
892 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
893 rdev->fence_drv[ring].initialized = false;
894 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
896 rdev->fence_drv[ring].rdev = rdev;
913 int ring;
916 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
917 radeon_fence_driver_init_ring(rdev, ring);
935 int ring, r;
938 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
939 if (!rdev->fence_drv[ring].initialized)
941 r = radeon_fence_wait_empty(rdev, ring);
944 radeon_fence_driver_force_completion(rdev, ring);
946 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
948 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
949 rdev->fence_drv[ring].initialized = false;
958 * @ring: the ring to complete
963 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
965 if (rdev->fence_drv[ring].initialized) {
966 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
967 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
989 seq_printf(m, "--- ring %d ---\n", i);
997 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
1047 switch (fence->ring) {