Lines Matching defs:rdev

62  * @rdev: radeon_device pointer
68 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
70 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
71 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
83 * @rdev: radeon_device pointer
89 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
91 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
94 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
109 * @rdev: radeon_device pointer
114 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
121 &rdev->fence_drv[ring].lockup_work,
128 * @rdev: radeon_device pointer
135 int radeon_fence_emit(struct radeon_device *rdev,
146 (*fence)->rdev = rdev;
147 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
151 &rdev->fence_queue.lock,
152 rdev->fence_context + ring,
154 radeon_fence_ring_emit(rdev, ring, *fence);
155 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
156 radeon_fence_schedule_check(rdev, ring);
178 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
187 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
188 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
198 * @rdev: radeon_device pointer
205 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
232 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
234 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
235 seq = radeon_fence_read(rdev, ring);
259 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
262 radeon_fence_schedule_check(rdev, ring);
278 struct radeon_device *rdev;
283 rdev = fence_drv->rdev;
284 ring = fence_drv - &rdev->fence_drv[0];
286 if (!down_read_trylock(&rdev->exclusive_lock)) {
288 radeon_fence_schedule_check(rdev, ring);
292 if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
296 spin_lock_irqsave(&rdev->irq.lock, irqflags);
297 radeon_irq_set(rdev);
298 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
301 if (radeon_fence_activity(rdev, ring))
302 wake_up_all(&rdev->fence_queue);
304 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
307 dev_warn(rdev->dev, "GPU lockup (current fence id "
313 rdev->needs_reset = true;
314 wake_up_all(&rdev->fence_queue);
316 up_read(&rdev->exclusive_lock);
322 * @rdev: radeon_device pointer
328 void radeon_fence_process(struct radeon_device *rdev, int ring)
330 if (radeon_fence_activity(rdev, ring))
331 wake_up_all(&rdev->fence_queue);
337 * @rdev: radeon device pointer
348 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
351 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
355 radeon_fence_process(rdev, ring);
356 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
365 struct radeon_device *rdev = fence->rdev;
369 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
373 if (down_read_trylock(&rdev->exclusive_lock)) {
374 radeon_fence_process(rdev, ring);
375 up_read(&rdev->exclusive_lock);
377 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
395 struct radeon_device *rdev = fence->rdev;
397 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
400 if (down_read_trylock(&rdev->exclusive_lock)) {
401 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
403 if (radeon_fence_activity(rdev, fence->ring))
404 wake_up_all_locked(&rdev->fence_queue);
407 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
408 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
409 up_read(&rdev->exclusive_lock);
413 up_read(&rdev->exclusive_lock);
416 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
417 rdev->fence_drv[fence->ring].delayed_irq = true;
418 radeon_fence_schedule_check(rdev, fence->ring);
424 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
444 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
458 * @rdev: radeon device pointer
466 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
471 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
480 * @rdev: radeon device pointer
494 static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
501 if (radeon_fence_any_seq_signaled(rdev, target_seq))
509 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
510 radeon_irq_kms_sw_irq_get(rdev, i);
514 r = wait_event_interruptible_timeout(rdev->fence_queue, (
515 radeon_fence_any_seq_signaled(rdev, target_seq)
516 || rdev->needs_reset), timeout);
518 r = wait_event_timeout(rdev->fence_queue, (
519 radeon_fence_any_seq_signaled(rdev, target_seq)
520 || rdev->needs_reset), timeout);
523 if (rdev->needs_reset)
530 radeon_irq_kms_sw_irq_put(rdev, i);
531 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
566 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
601 * @rdev: radeon device pointer
611 int radeon_fence_wait_any(struct radeon_device *rdev,
634 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
644 * @rdev: radeon device pointer
651 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
656 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
657 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
662 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
671 * @rdev: radeon device pointer
678 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
683 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
687 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
692 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
732 * @rdev: radeon device pointer
739 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
746 radeon_fence_process(rdev, ring);
747 emitted = rdev->fence_drv[ring].sync_seq[ring]
748 - atomic64_read(&rdev->fence_drv[ring].last_seq);
780 fdrv = &fence->rdev->fence_drv[dst_ring];
811 src = &fence->rdev->fence_drv[fence->ring];
812 dst = &fence->rdev->fence_drv[dst_ring];
825 * @rdev: radeon device pointer
833 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
838 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
839 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
840 rdev->fence_drv[ring].scratch_reg = 0;
843 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
844 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
849 index = ALIGN(rdev->uvd_fw->size, 8);
850 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
851 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
855 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
857 dev_err(rdev->dev, "fence failed to get scratch register\n");
861 rdev->fence_drv[ring].scratch_reg -
862 rdev->scratch.reg_base;
863 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
864 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
866 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
867 rdev->fence_drv[ring].initialized = true;
868 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
869 ring, rdev->fence_drv[ring].gpu_addr);
877 * @rdev: radeon device pointer
883 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
887 rdev->fence_drv[ring].scratch_reg = -1;
888 rdev->fence_drv[ring].cpu_addr = NULL;
889 rdev->fence_drv[ring].gpu_addr = 0;
891 rdev->fence_drv[ring].sync_seq[i] = 0;
892 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
893 rdev->fence_drv[ring].initialized = false;
894 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
896 rdev->fence_drv[ring].rdev = rdev;
903 * @rdev: radeon device pointer
911 int radeon_fence_driver_init(struct radeon_device *rdev)
915 init_waitqueue_head(&rdev->fence_queue);
917 radeon_fence_driver_init_ring(rdev, ring);
919 if (radeon_debugfs_fence_init(rdev)) {
920 dev_err(rdev->dev, "fence debugfs file creation failed\n");
929 * @rdev: radeon device pointer
933 void radeon_fence_driver_fini(struct radeon_device *rdev)
937 mutex_lock(&rdev->ring_lock);
939 if (!rdev->fence_drv[ring].initialized)
941 r = radeon_fence_wait_empty(rdev, ring);
944 radeon_fence_driver_force_completion(rdev, ring);
946 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
947 wake_up_all(&rdev->fence_queue);
948 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
949 rdev->fence_drv[ring].initialized = false;
951 mutex_unlock(&rdev->ring_lock);
957 * @rdev: radeon device pointer
963 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
965 if (rdev->fence_drv[ring].initialized) {
966 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
967 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
980 struct radeon_device *rdev = dev->dev_private;
984 if (!rdev->fence_drv[i].initialized)
987 radeon_fence_process(rdev, i);
991 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
993 rdev->fence_drv[i].sync_seq[i]);
996 if (i != j && rdev->fence_drv[j].initialized)
998 j, rdev->fence_drv[i].sync_seq[j]);
1013 struct radeon_device *rdev = dev->dev_private;
1015 down_read(&rdev->exclusive_lock);
1016 seq_printf(m, "%d\n", rdev->needs_reset);
1017 rdev->needs_reset = true;
1018 wake_up_all(&rdev->fence_queue);
1019 up_read(&rdev->exclusive_lock);
1030 int radeon_debugfs_fence_init(struct radeon_device *rdev)
1033 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1083 struct radeon_device *rdev = fence->rdev;
1104 if (rdev->needs_reset) {