1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3/* Copyright 2019 Collabora ltd. */
4#include <linux/delay.h>
5#include <linux/interrupt.h>
6#include <linux/io.h>
7#include <linux/platform_device.h>
8#include <linux/pm_runtime.h>
9#include <linux/dma-resv.h>
10#include <drm/gpu_scheduler.h>
11#include <drm/panfrost_drm.h>
12
13#include "panfrost_device.h"
14#include "panfrost_devfreq.h"
15#include "panfrost_job.h"
16#include "panfrost_features.h"
17#include "panfrost_issues.h"
18#include "panfrost_gem.h"
19#include "panfrost_regs.h"
20#include "panfrost_gpu.h"
21#include "panfrost_mmu.h"
22
23#define JOB_TIMEOUT_MS 500
24
25#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
26#define job_read(dev, reg) readl(dev->iomem + (reg))
27
28enum panfrost_queue_status {
29	PANFROST_QUEUE_STATUS_ACTIVE,
30	PANFROST_QUEUE_STATUS_STOPPED,
31	PANFROST_QUEUE_STATUS_STARTING,
32	PANFROST_QUEUE_STATUS_FAULT_PENDING,
33};
34
35struct panfrost_queue_state {
36	struct drm_gpu_scheduler sched;
37	atomic_t status;
38	struct mutex lock;
39	u64 fence_context;
40	u64 emit_seqno;
41};
42
43struct panfrost_job_slot {
44	struct panfrost_queue_state queue[NUM_JOB_SLOTS];
45	spinlock_t job_lock;
46};
47
48static struct panfrost_job *
49to_panfrost_job(struct drm_sched_job *sched_job)
50{
51	return container_of(sched_job, struct panfrost_job, base);
52}
53
54struct panfrost_fence {
55	struct dma_fence base;
56	struct drm_device *dev;
57	/* panfrost seqno for signaled() test */
58	u64 seqno;
59	int queue;
60};
61
62static inline struct panfrost_fence *
63to_panfrost_fence(struct dma_fence *fence)
64{
65	return (struct panfrost_fence *)fence;
66}
67
68static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
69{
70	return "panfrost";
71}
72
73static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
74{
75	struct panfrost_fence *f = to_panfrost_fence(fence);
76
77	switch (f->queue) {
78	case 0:
79		return "panfrost-js-0";
80	case 1:
81		return "panfrost-js-1";
82	case 2:
83		return "panfrost-js-2";
84	default:
85		return NULL;
86	}
87}
88
89static const struct dma_fence_ops panfrost_fence_ops = {
90	.get_driver_name = panfrost_fence_get_driver_name,
91	.get_timeline_name = panfrost_fence_get_timeline_name,
92};
93
94static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
95{
96	struct panfrost_fence *fence;
97	struct panfrost_job_slot *js = pfdev->js;
98
99	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
100	if (!fence)
101		return ERR_PTR(-ENOMEM);
102
103	fence->dev = pfdev->ddev;
104	fence->queue = js_num;
105	fence->seqno = ++js->queue[js_num].emit_seqno;
106	dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
107		       js->queue[js_num].fence_context, fence->seqno);
108
109	return &fence->base;
110}
111
112static int panfrost_job_get_slot(struct panfrost_job *job)
113{
114	/* JS0: fragment jobs.
115	 * JS1: vertex/tiler jobs
116	 * JS2: compute jobs
117	 */
118	if (job->requirements & PANFROST_JD_REQ_FS)
119		return 0;
120
121/* Not exposed to userspace yet */
122#if 0
123	if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
124		if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
125		    (job->pfdev->features.nr_core_groups == 2))
126			return 2;
127		if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
128			return 2;
129	}
130#endif
131	return 1;
132}
133
134static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
135					u32 requirements,
136					int js)
137{
138	u64 affinity;
139
140	/*
141	 * Use all cores for now.
142	 * Eventually we may need to support tiler only jobs and h/w with
143	 * multiple (2) coherent core groups
144	 */
145	affinity = pfdev->features.shader_present;
146
147	job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
148	job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
149}
150
151static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
152{
153	struct panfrost_device *pfdev = job->pfdev;
154	u32 cfg;
155	u64 jc_head = job->jc;
156	int ret;
157
158	panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
159
160	ret = pm_runtime_get_sync(pfdev->dev);
161	if (ret < 0)
162		return;
163
164	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
165		return;
166	}
167
168	cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
169
170	job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
171	job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
172
173	panfrost_job_write_affinity(pfdev, job->requirements, js);
174
175	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
176	 * start */
177	cfg |= JS_CONFIG_THREAD_PRI(8) |
178		JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
179		JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
180
181	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
182		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
183
184	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
185		cfg |= JS_CONFIG_START_MMU;
186
187	job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
188
189	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
190		job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
191
192	/* GO ! */
193	dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
194				job, js, jc_head);
195
196	job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
197}
198
199static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
200					   int bo_count,
201					   struct dma_fence **implicit_fences)
202{
203	int i;
204
205	for (i = 0; i < bo_count; i++)
206		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
207}
208
209static void panfrost_attach_object_fences(struct drm_gem_object **bos,
210					  int bo_count,
211					  struct dma_fence *fence)
212{
213	int i;
214
215	for (i = 0; i < bo_count; i++)
216		dma_resv_add_excl_fence(bos[i]->resv, fence);
217}
218
219int panfrost_job_push(struct panfrost_job *job)
220{
221	struct panfrost_device *pfdev = job->pfdev;
222	int slot = panfrost_job_get_slot(job);
223	struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
224	struct ww_acquire_ctx acquire_ctx;
225	int ret = 0;
226
227	mutex_lock(&pfdev->sched_lock);
228
229	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
230					    &acquire_ctx);
231	if (ret) {
232		mutex_unlock(&pfdev->sched_lock);
233		return ret;
234	}
235
236	ret = drm_sched_job_init(&job->base, entity, NULL);
237	if (ret) {
238		mutex_unlock(&pfdev->sched_lock);
239		goto unlock;
240	}
241
242	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
243
244	kref_get(&job->refcount); /* put by scheduler job completion */
245
246	panfrost_acquire_object_fences(job->bos, job->bo_count,
247				       job->implicit_fences);
248
249	drm_sched_entity_push_job(&job->base, entity);
250
251	mutex_unlock(&pfdev->sched_lock);
252
253	panfrost_attach_object_fences(job->bos, job->bo_count,
254				      job->render_done_fence);
255
256unlock:
257	drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
258
259	return ret;
260}
261
262static void panfrost_job_cleanup(struct kref *ref)
263{
264	struct panfrost_job *job = container_of(ref, struct panfrost_job,
265						refcount);
266	unsigned int i;
267
268	if (job->in_fences) {
269		for (i = 0; i < job->in_fence_count; i++)
270			dma_fence_put(job->in_fences[i]);
271		kvfree(job->in_fences);
272	}
273	if (job->implicit_fences) {
274		for (i = 0; i < job->bo_count; i++)
275			dma_fence_put(job->implicit_fences[i]);
276		kvfree(job->implicit_fences);
277	}
278	dma_fence_put(job->done_fence);
279	dma_fence_put(job->render_done_fence);
280
281	if (job->mappings) {
282		for (i = 0; i < job->bo_count; i++) {
283			if (!job->mappings[i])
284				break;
285
286			atomic_dec(&job->mappings[i]->obj->gpu_usecount);
287			panfrost_gem_mapping_put(job->mappings[i]);
288		}
289		kvfree(job->mappings);
290	}
291
292	if (job->bos) {
293		for (i = 0; i < job->bo_count; i++)
294			drm_gem_object_put(job->bos[i]);
295
296		kvfree(job->bos);
297	}
298
299	kfree(job);
300}
301
302void panfrost_job_put(struct panfrost_job *job)
303{
304	kref_put(&job->refcount, panfrost_job_cleanup);
305}
306
307static void panfrost_job_free(struct drm_sched_job *sched_job)
308{
309	struct panfrost_job *job = to_panfrost_job(sched_job);
310
311	drm_sched_job_cleanup(sched_job);
312
313	panfrost_job_put(job);
314}
315
316static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
317						 struct drm_sched_entity *s_entity)
318{
319	struct panfrost_job *job = to_panfrost_job(sched_job);
320	struct dma_fence *fence;
321	unsigned int i;
322
323	/* Explicit fences */
324	for (i = 0; i < job->in_fence_count; i++) {
325		if (job->in_fences[i]) {
326			fence = job->in_fences[i];
327			job->in_fences[i] = NULL;
328			return fence;
329		}
330	}
331
332	/* Implicit fences, max. one per BO */
333	for (i = 0; i < job->bo_count; i++) {
334		if (job->implicit_fences[i]) {
335			fence = job->implicit_fences[i];
336			job->implicit_fences[i] = NULL;
337			return fence;
338		}
339	}
340
341	return NULL;
342}
343
344static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
345{
346	struct panfrost_job *job = to_panfrost_job(sched_job);
347	struct panfrost_device *pfdev = job->pfdev;
348	int slot = panfrost_job_get_slot(job);
349	struct dma_fence *fence = NULL;
350
351	if (unlikely(job->base.s_fence->finished.error))
352		return NULL;
353
354	pfdev->jobs[slot] = job;
355
356	fence = panfrost_fence_create(pfdev, slot);
357	if (IS_ERR(fence))
358		return NULL;
359
360	if (job->done_fence)
361		dma_fence_put(job->done_fence);
362	job->done_fence = dma_fence_get(fence);
363
364	panfrost_job_hw_submit(job, slot);
365
366	return fence;
367}
368
369void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
370{
371	int j;
372	u32 irq_mask = 0;
373
374	for (j = 0; j < NUM_JOB_SLOTS; j++) {
375		irq_mask |= MK_JS_MASK(j);
376	}
377
378	job_write(pfdev, JOB_INT_CLEAR, irq_mask);
379	job_write(pfdev, JOB_INT_MASK, irq_mask);
380}
381
382static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
383				    struct drm_sched_job *bad)
384{
385	enum panfrost_queue_status old_status;
386	bool stopped = false;
387
388	mutex_lock(&queue->lock);
389	old_status = atomic_xchg(&queue->status,
390				 PANFROST_QUEUE_STATUS_STOPPED);
391	if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
392		goto out;
393
394	WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);
395	drm_sched_stop(&queue->sched, bad);
396	if (bad)
397		drm_sched_increase_karma(bad);
398
399	stopped = true;
400
401	/*
402	 * Set the timeout to max so the timer doesn't get started
403	 * when we return from the timeout handler (restored in
404	 * panfrost_scheduler_start()).
405	 */
406	queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
407
408out:
409	mutex_unlock(&queue->lock);
410
411	return stopped;
412}
413
414static void panfrost_scheduler_start(struct panfrost_queue_state *queue)
415{
416	enum panfrost_queue_status old_status;
417
418	mutex_lock(&queue->lock);
419	old_status = atomic_xchg(&queue->status,
420				 PANFROST_QUEUE_STATUS_STARTING);
421	WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED);
422
423	/* Restore the original timeout before starting the scheduler. */
424	queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS);
425	drm_sched_resubmit_jobs(&queue->sched);
426	drm_sched_start(&queue->sched, true);
427	old_status = atomic_xchg(&queue->status,
428				 PANFROST_QUEUE_STATUS_ACTIVE);
429	if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING)
430		drm_sched_fault(&queue->sched);
431
432	mutex_unlock(&queue->lock);
433}
434
435static void panfrost_job_timedout(struct drm_sched_job *sched_job)
436{
437	struct panfrost_job *job = to_panfrost_job(sched_job);
438	struct panfrost_device *pfdev = job->pfdev;
439	int js = panfrost_job_get_slot(job);
440
441	/*
442	 * If the GPU managed to complete this jobs fence, the timeout is
443	 * spurious. Bail out.
444	 */
445	if (dma_fence_is_signaled(job->done_fence))
446		return;
447
448	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
449		js,
450		job_read(pfdev, JS_CONFIG(js)),
451		job_read(pfdev, JS_STATUS(js)),
452		job_read(pfdev, JS_HEAD_LO(js)),
453		job_read(pfdev, JS_TAIL_LO(js)),
454		sched_job);
455
456	/* Scheduler is already stopped, nothing to do. */
457	if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
458		return;
459
460	/* Schedule a reset if there's no reset in progress. */
461	if (!atomic_xchg(&pfdev->reset.pending, 1))
462		schedule_work(&pfdev->reset.work);
463}
464
465static const struct drm_sched_backend_ops panfrost_sched_ops = {
466	.dependency = panfrost_job_dependency,
467	.run_job = panfrost_job_run,
468	.timedout_job = panfrost_job_timedout,
469	.free_job = panfrost_job_free
470};
471
472static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
473{
474	struct panfrost_device *pfdev = data;
475	u32 status = job_read(pfdev, JOB_INT_STAT);
476	int j;
477
478	dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
479
480	if (!status)
481		return IRQ_NONE;
482
483	pm_runtime_mark_last_busy(pfdev->dev);
484
485	for (j = 0; status; j++) {
486		u32 mask = MK_JS_MASK(j);
487
488		if (!(status & mask))
489			continue;
490
491		job_write(pfdev, JOB_INT_CLEAR, mask);
492
493		if (status & JOB_INT_MASK_ERR(j)) {
494			enum panfrost_queue_status old_status;
495
496			job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
497
498			dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
499				j,
500				panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
501				job_read(pfdev, JS_HEAD_LO(j)),
502				job_read(pfdev, JS_TAIL_LO(j)));
503
504			/*
505			 * When the queue is being restarted we don't report
506			 * faults directly to avoid races between the timeout
507			 * and reset handlers. panfrost_scheduler_start() will
508			 * call drm_sched_fault() after the queue has been
509			 * started if status == FAULT_PENDING.
510			 */
511			old_status = atomic_cmpxchg(&pfdev->js->queue[j].status,
512						    PANFROST_QUEUE_STATUS_STARTING,
513						    PANFROST_QUEUE_STATUS_FAULT_PENDING);
514			if (old_status == PANFROST_QUEUE_STATUS_ACTIVE)
515				drm_sched_fault(&pfdev->js->queue[j].sched);
516		}
517
518		if (status & JOB_INT_MASK_DONE(j)) {
519			struct panfrost_job *job;
520
521			spin_lock(&pfdev->js->job_lock);
522			job = pfdev->jobs[j];
523			/* Only NULL if job timeout occurred */
524			if (job) {
525				pfdev->jobs[j] = NULL;
526
527				panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
528				panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
529
530				dma_fence_signal_locked(job->done_fence);
531				pm_runtime_put_autosuspend(pfdev->dev);
532			}
533			spin_unlock(&pfdev->js->job_lock);
534		}
535
536		status &= ~mask;
537	}
538
539	return IRQ_HANDLED;
540}
541
542static void panfrost_reset(struct work_struct *work)
543{
544	struct panfrost_device *pfdev = container_of(work,
545						     struct panfrost_device,
546						     reset.work);
547	unsigned long flags;
548	unsigned int i;
549	bool cookie;
550
551	cookie = dma_fence_begin_signalling();
552	for (i = 0; i < NUM_JOB_SLOTS; i++) {
553		/*
554		 * We want pending timeouts to be handled before we attempt
555		 * to stop the scheduler. If we don't do that and the timeout
556		 * handler is in flight, it might have removed the bad job
557		 * from the list, and we'll lose this job if the reset handler
558		 * enters the critical section in panfrost_scheduler_stop()
559		 * before the timeout handler.
560		 *
561		 * Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need
562		 * something big enough to make sure the timer will not expire
563		 * before we manage to stop the scheduler, but we can't use
564		 * MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job()
565		 * considers that as 'timer is not running' and will dequeue
566		 * the job without making sure the timeout handler is not
567		 * running.
568		 */
569		pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1;
570		cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr);
571		panfrost_scheduler_stop(&pfdev->js->queue[i], NULL);
572	}
573
574	/* All timers have been stopped, we can safely reset the pending state. */
575	atomic_set(&pfdev->reset.pending, 0);
576
577	spin_lock_irqsave(&pfdev->js->job_lock, flags);
578	for (i = 0; i < NUM_JOB_SLOTS; i++) {
579		if (pfdev->jobs[i]) {
580			pm_runtime_put_noidle(pfdev->dev);
581			panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
582			pfdev->jobs[i] = NULL;
583		}
584	}
585	spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
586
587	panfrost_device_reset(pfdev);
588
589	for (i = 0; i < NUM_JOB_SLOTS; i++)
590		panfrost_scheduler_start(&pfdev->js->queue[i]);
591
592	dma_fence_end_signalling(cookie);
593}
594
595int panfrost_job_init(struct panfrost_device *pfdev)
596{
597	struct panfrost_job_slot *js;
598	int ret, j, irq;
599
600	INIT_WORK(&pfdev->reset.work, panfrost_reset);
601
602	pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
603	if (!js)
604		return -ENOMEM;
605
606	spin_lock_init(&js->job_lock);
607
608	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
609	if (irq <= 0)
610		return -ENODEV;
611
612	ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
613			       IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
614	if (ret) {
615		dev_err(pfdev->dev, "failed to request job irq");
616		return ret;
617	}
618
619	for (j = 0; j < NUM_JOB_SLOTS; j++) {
620		mutex_init(&js->queue[j].lock);
621
622		js->queue[j].fence_context = dma_fence_context_alloc(1);
623
624		ret = drm_sched_init(&js->queue[j].sched,
625				     &panfrost_sched_ops,
626				     1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
627				     "pan_js");
628		if (ret) {
629			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
630			goto err_sched;
631		}
632	}
633
634	panfrost_job_enable_interrupts(pfdev);
635
636	return 0;
637
638err_sched:
639	for (j--; j >= 0; j--)
640		drm_sched_fini(&js->queue[j].sched);
641
642	return ret;
643}
644
645void panfrost_job_fini(struct panfrost_device *pfdev)
646{
647	struct panfrost_job_slot *js = pfdev->js;
648	int j;
649
650	job_write(pfdev, JOB_INT_MASK, 0);
651
652	for (j = 0; j < NUM_JOB_SLOTS; j++) {
653		drm_sched_fini(&js->queue[j].sched);
654		mutex_destroy(&js->queue[j].lock);
655	}
656
657}
658
659int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
660{
661	struct panfrost_device *pfdev = panfrost_priv->pfdev;
662	struct panfrost_job_slot *js = pfdev->js;
663	struct drm_gpu_scheduler *sched;
664	int ret, i;
665
666	for (i = 0; i < NUM_JOB_SLOTS; i++) {
667		sched = &js->queue[i].sched;
668		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
669					    DRM_SCHED_PRIORITY_NORMAL, &sched,
670					    1, NULL);
671		if (WARN_ON(ret))
672			return ret;
673	}
674	return 0;
675}
676
677void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
678{
679	int i;
680
681	for (i = 0; i < NUM_JOB_SLOTS; i++)
682		drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
683}
684
685int panfrost_job_is_idle(struct panfrost_device *pfdev)
686{
687	struct panfrost_job_slot *js = pfdev->js;
688	int i;
689
690	for (i = 0; i < NUM_JOB_SLOTS; i++) {
691		/* If there are any jobs in the HW queue, we're not idle */
692		if (atomic_read(&js->queue[i].sched.hw_rq_count))
693			return false;
694	}
695
696	return true;
697}
698