1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/uaccess.h>
12#include <linux/slab.h>
13
14#define HL_CS_FLAGS_SIG_WAIT	(HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT)
15
16static void job_wq_completion(struct work_struct *work);
17static long _hl_cs_wait_ioctl(struct hl_device *hdev,
18		struct hl_ctx *ctx, u64 timeout_us, u64 seq);
19static void cs_do_release(struct kref *ref);
20
21static void hl_sob_reset(struct kref *ref)
22{
23	struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
24							kref);
25	struct hl_device *hdev = hw_sob->hdev;
26
27	hdev->asic_funcs->reset_sob(hdev, hw_sob);
28}
29
30void hl_sob_reset_error(struct kref *ref)
31{
32	struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
33							kref);
34	struct hl_device *hdev = hw_sob->hdev;
35
36	dev_crit(hdev->dev,
37			"SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
38			hw_sob->q_idx, hw_sob->sob_id);
39}
40
41static void hl_fence_release(struct kref *kref)
42{
43	struct hl_fence *fence =
44		container_of(kref, struct hl_fence, refcount);
45	struct hl_cs_compl *hl_cs_cmpl =
46		container_of(fence, struct hl_cs_compl, base_fence);
47	struct hl_device *hdev = hl_cs_cmpl->hdev;
48
49	/* EBUSY means the CS was never submitted and hence we don't have
50	 * an attached hw_sob object that we should handle here
51	 */
52	if (fence->error == -EBUSY)
53		goto free;
54
55	if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
56			(hl_cs_cmpl->type == CS_TYPE_WAIT)) {
57
58		dev_dbg(hdev->dev,
59			"CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
60			hl_cs_cmpl->cs_seq,
61			hl_cs_cmpl->type,
62			hl_cs_cmpl->hw_sob->sob_id,
63			hl_cs_cmpl->sob_val);
64
65		/*
66		 * A signal CS can get completion while the corresponding wait
67		 * for signal CS is on its way to the PQ. The wait for signal CS
68		 * will get stuck if the signal CS incremented the SOB to its
69		 * max value and there are no pending (submitted) waits on this
70		 * SOB.
71		 * We do the following to void this situation:
72		 * 1. The wait for signal CS must get a ref for the signal CS as
73		 *    soon as possible in cs_ioctl_signal_wait() and put it
74		 *    before being submitted to the PQ but after it incremented
75		 *    the SOB refcnt in init_signal_wait_cs().
76		 * 2. Signal/Wait for signal CS will decrement the SOB refcnt
77		 *    here.
78		 * These two measures guarantee that the wait for signal CS will
79		 * reset the SOB upon completion rather than the signal CS and
80		 * hence the above scenario is avoided.
81		 */
82		kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
83	}
84
85free:
86	kfree(hl_cs_cmpl);
87}
88
89void hl_fence_put(struct hl_fence *fence)
90{
91	if (fence)
92		kref_put(&fence->refcount, hl_fence_release);
93}
94
95void hl_fence_get(struct hl_fence *fence)
96{
97	if (fence)
98		kref_get(&fence->refcount);
99}
100
101static void hl_fence_init(struct hl_fence *fence)
102{
103	kref_init(&fence->refcount);
104	fence->error = 0;
105	init_completion(&fence->completion);
106}
107
108static void cs_get(struct hl_cs *cs)
109{
110	kref_get(&cs->refcount);
111}
112
113static int cs_get_unless_zero(struct hl_cs *cs)
114{
115	return kref_get_unless_zero(&cs->refcount);
116}
117
118static void cs_put(struct hl_cs *cs)
119{
120	kref_put(&cs->refcount, cs_do_release);
121}
122
123static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
124{
125	/*
126	 * Patched CB is created for external queues jobs, and for H/W queues
127	 * jobs if the user CB was allocated by driver and MMU is disabled.
128	 */
129	return (job->queue_type == QUEUE_TYPE_EXT ||
130			(job->queue_type == QUEUE_TYPE_HW &&
131					job->is_kernel_allocated_cb &&
132					!hdev->mmu_enable));
133}
134
135/*
136 * cs_parser - parse the user command submission
137 *
138 * @hpriv	: pointer to the private data of the fd
139 * @job        : pointer to the job that holds the command submission info
140 *
141 * The function parses the command submission of the user. It calls the
142 * ASIC specific parser, which returns a list of memory blocks to send
143 * to the device as different command buffers
144 *
145 */
146static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
147{
148	struct hl_device *hdev = hpriv->hdev;
149	struct hl_cs_parser parser;
150	int rc;
151
152	parser.ctx_id = job->cs->ctx->asid;
153	parser.cs_sequence = job->cs->sequence;
154	parser.job_id = job->id;
155
156	parser.hw_queue_id = job->hw_queue_id;
157	parser.job_userptr_list = &job->userptr_list;
158	parser.patched_cb = NULL;
159	parser.user_cb = job->user_cb;
160	parser.user_cb_size = job->user_cb_size;
161	parser.queue_type = job->queue_type;
162	parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
163	job->patched_cb = NULL;
164
165	rc = hdev->asic_funcs->cs_parser(hdev, &parser);
166
167	if (is_cb_patched(hdev, job)) {
168		if (!rc) {
169			job->patched_cb = parser.patched_cb;
170			job->job_cb_size = parser.patched_cb_size;
171			job->contains_dma_pkt = parser.contains_dma_pkt;
172
173			spin_lock(&job->patched_cb->lock);
174			job->patched_cb->cs_cnt++;
175			spin_unlock(&job->patched_cb->lock);
176		}
177
178		/*
179		 * Whether the parsing worked or not, we don't need the
180		 * original CB anymore because it was already parsed and
181		 * won't be accessed again for this CS
182		 */
183		spin_lock(&job->user_cb->lock);
184		job->user_cb->cs_cnt--;
185		spin_unlock(&job->user_cb->lock);
186		hl_cb_put(job->user_cb);
187		job->user_cb = NULL;
188	} else if (!rc) {
189		job->job_cb_size = job->user_cb_size;
190	}
191
192	return rc;
193}
194
195static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
196{
197	struct hl_cs *cs = job->cs;
198
199	if (is_cb_patched(hdev, job)) {
200		hl_userptr_delete_list(hdev, &job->userptr_list);
201
202		/*
203		 * We might arrive here from rollback and patched CB wasn't
204		 * created, so we need to check it's not NULL
205		 */
206		if (job->patched_cb) {
207			spin_lock(&job->patched_cb->lock);
208			job->patched_cb->cs_cnt--;
209			spin_unlock(&job->patched_cb->lock);
210
211			hl_cb_put(job->patched_cb);
212		}
213	}
214
215	/* For H/W queue jobs, if a user CB was allocated by driver and MMU is
216	 * enabled, the user CB isn't released in cs_parser() and thus should be
217	 * released here.
218	 */
219	if (job->queue_type == QUEUE_TYPE_HW &&
220			job->is_kernel_allocated_cb && hdev->mmu_enable) {
221		spin_lock(&job->user_cb->lock);
222		job->user_cb->cs_cnt--;
223		spin_unlock(&job->user_cb->lock);
224
225		hl_cb_put(job->user_cb);
226	}
227
228	/*
229	 * This is the only place where there can be multiple threads
230	 * modifying the list at the same time
231	 */
232	spin_lock(&cs->job_lock);
233	list_del(&job->cs_node);
234	spin_unlock(&cs->job_lock);
235
236	hl_debugfs_remove_job(hdev, job);
237
238	if (job->queue_type == QUEUE_TYPE_EXT ||
239			job->queue_type == QUEUE_TYPE_HW)
240		cs_put(cs);
241
242	kfree(job);
243}
244
245static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
246{
247	hdev->aggregated_cs_counters.device_in_reset_drop_cnt +=
248			ctx->cs_counters.device_in_reset_drop_cnt;
249	hdev->aggregated_cs_counters.out_of_mem_drop_cnt +=
250			ctx->cs_counters.out_of_mem_drop_cnt;
251	hdev->aggregated_cs_counters.parsing_drop_cnt +=
252			ctx->cs_counters.parsing_drop_cnt;
253	hdev->aggregated_cs_counters.queue_full_drop_cnt +=
254			ctx->cs_counters.queue_full_drop_cnt;
255	hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt +=
256			ctx->cs_counters.max_cs_in_flight_drop_cnt;
257}
258
259static void cs_do_release(struct kref *ref)
260{
261	struct hl_cs *cs = container_of(ref, struct hl_cs,
262						refcount);
263	struct hl_device *hdev = cs->ctx->hdev;
264	struct hl_cs_job *job, *tmp;
265
266	cs->completed = true;
267
268	/*
269	 * Although if we reached here it means that all external jobs have
270	 * finished, because each one of them took refcnt to CS, we still
271	 * need to go over the internal jobs and free them. Otherwise, we
272	 * will have leaked memory and what's worse, the CS object (and
273	 * potentially the CTX object) could be released, while the JOB
274	 * still holds a pointer to them (but no reference).
275	 */
276	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
277		free_job(hdev, job);
278
279	/* We also need to update CI for internal queues */
280	if (cs->submitted) {
281		hdev->asic_funcs->hw_queues_lock(hdev);
282
283		hdev->cs_active_cnt--;
284		if (!hdev->cs_active_cnt) {
285			struct hl_device_idle_busy_ts *ts;
286
287			ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
288			ts->busy_to_idle_ts = ktime_get();
289
290			if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
291				hdev->idle_busy_ts_idx = 0;
292		} else if (hdev->cs_active_cnt < 0) {
293			dev_crit(hdev->dev, "CS active cnt %d is negative\n",
294				hdev->cs_active_cnt);
295		}
296
297		hdev->asic_funcs->hw_queues_unlock(hdev);
298
299		hl_int_hw_queue_update_ci(cs);
300
301		spin_lock(&hdev->hw_queues_mirror_lock);
302		/* remove CS from hw_queues mirror list */
303		list_del_init(&cs->mirror_node);
304		spin_unlock(&hdev->hw_queues_mirror_lock);
305
306		/*
307		 * Don't cancel TDR in case this CS was timedout because we
308		 * might be running from the TDR context
309		 */
310		if ((!cs->timedout) &&
311			(hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
312			struct hl_cs *next;
313
314			if (cs->tdr_active)
315				cancel_delayed_work_sync(&cs->work_tdr);
316
317			spin_lock(&hdev->hw_queues_mirror_lock);
318
319			/* queue TDR for next CS */
320			next = list_first_entry_or_null(
321					&hdev->hw_queues_mirror_list,
322					struct hl_cs, mirror_node);
323
324			if ((next) && (!next->tdr_active)) {
325				next->tdr_active = true;
326				schedule_delayed_work(&next->work_tdr,
327							hdev->timeout_jiffies);
328			}
329
330			spin_unlock(&hdev->hw_queues_mirror_lock);
331		}
332	} else if (cs->type == CS_TYPE_WAIT) {
333		/*
334		 * In case the wait for signal CS was submitted, the put occurs
335		 * in init_signal_wait_cs() right before hanging on the PQ.
336		 */
337		hl_fence_put(cs->signal_fence);
338	}
339
340	/*
341	 * Must be called before hl_ctx_put because inside we use ctx to get
342	 * the device
343	 */
344	hl_debugfs_remove_cs(cs);
345
346	hl_ctx_put(cs->ctx);
347
348	/* We need to mark an error for not submitted because in that case
349	 * the hl fence release flow is different. Mainly, we don't need
350	 * to handle hw_sob for signal/wait
351	 */
352	if (cs->timedout)
353		cs->fence->error = -ETIMEDOUT;
354	else if (cs->aborted)
355		cs->fence->error = -EIO;
356	else if (!cs->submitted)
357		cs->fence->error = -EBUSY;
358
359	complete_all(&cs->fence->completion);
360	hl_fence_put(cs->fence);
361	cs_counters_aggregate(hdev, cs->ctx);
362
363	kfree(cs->jobs_in_queue_cnt);
364	kfree(cs);
365}
366
367static void cs_timedout(struct work_struct *work)
368{
369	struct hl_device *hdev;
370	int rc;
371	struct hl_cs *cs = container_of(work, struct hl_cs,
372						 work_tdr.work);
373	rc = cs_get_unless_zero(cs);
374	if (!rc)
375		return;
376
377	if ((!cs->submitted) || (cs->completed)) {
378		cs_put(cs);
379		return;
380	}
381
382	/* Mark the CS is timed out so we won't try to cancel its TDR */
383	cs->timedout = true;
384
385	hdev = cs->ctx->hdev;
386
387	dev_err(hdev->dev,
388		"Command submission %llu has not finished in time!\n",
389		cs->sequence);
390
391	cs_put(cs);
392
393	if (hdev->reset_on_lockup)
394		hl_device_reset(hdev, false, false);
395}
396
397static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
398			enum hl_cs_type cs_type, struct hl_cs **cs_new)
399{
400	struct hl_cs_compl *cs_cmpl;
401	struct hl_fence *other = NULL;
402	struct hl_cs *cs;
403	int rc;
404
405	cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
406	if (!cs)
407		return -ENOMEM;
408
409	cs->ctx = ctx;
410	cs->submitted = false;
411	cs->completed = false;
412	cs->type = cs_type;
413	INIT_LIST_HEAD(&cs->job_list);
414	INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
415	kref_init(&cs->refcount);
416	spin_lock_init(&cs->job_lock);
417
418	cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
419	if (!cs_cmpl) {
420		rc = -ENOMEM;
421		goto free_cs;
422	}
423
424	cs_cmpl->hdev = hdev;
425	cs_cmpl->type = cs->type;
426	spin_lock_init(&cs_cmpl->lock);
427	cs->fence = &cs_cmpl->base_fence;
428
429	spin_lock(&ctx->cs_lock);
430
431	cs_cmpl->cs_seq = ctx->cs_sequence;
432	other = ctx->cs_pending[cs_cmpl->cs_seq &
433				(hdev->asic_prop.max_pending_cs - 1)];
434
435	if (other && !completion_done(&other->completion)) {
436		dev_dbg_ratelimited(hdev->dev,
437			"Rejecting CS because of too many in-flights CS\n");
438		ctx->cs_counters.max_cs_in_flight_drop_cnt++;
439		rc = -EAGAIN;
440		goto free_fence;
441	}
442
443	cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
444			sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
445	if (!cs->jobs_in_queue_cnt) {
446		rc = -ENOMEM;
447		goto free_fence;
448	}
449
450	/* init hl_fence */
451	hl_fence_init(&cs_cmpl->base_fence);
452
453	cs->sequence = cs_cmpl->cs_seq;
454
455	ctx->cs_pending[cs_cmpl->cs_seq &
456			(hdev->asic_prop.max_pending_cs - 1)] =
457							&cs_cmpl->base_fence;
458	ctx->cs_sequence++;
459
460	hl_fence_get(&cs_cmpl->base_fence);
461
462	hl_fence_put(other);
463
464	spin_unlock(&ctx->cs_lock);
465
466	*cs_new = cs;
467
468	return 0;
469
470free_fence:
471	spin_unlock(&ctx->cs_lock);
472	kfree(cs_cmpl);
473free_cs:
474	kfree(cs);
475	return rc;
476}
477
478static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
479{
480	struct hl_cs_job *job, *tmp;
481
482	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
483		free_job(hdev, job);
484}
485
486void hl_cs_rollback_all(struct hl_device *hdev)
487{
488	int i;
489	struct hl_cs *cs, *tmp;
490
491	/* flush all completions */
492	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
493		flush_workqueue(hdev->cq_wq[i]);
494
495	/* Make sure we don't have leftovers in the H/W queues mirror list */
496	list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
497				mirror_node) {
498		cs_get(cs);
499		cs->aborted = true;
500		dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
501					cs->ctx->asid, cs->sequence);
502		cs_rollback(hdev, cs);
503		cs_put(cs);
504	}
505}
506
507static void job_wq_completion(struct work_struct *work)
508{
509	struct hl_cs_job *job = container_of(work, struct hl_cs_job,
510						finish_work);
511	struct hl_cs *cs = job->cs;
512	struct hl_device *hdev = cs->ctx->hdev;
513
514	/* job is no longer needed */
515	free_job(hdev, job);
516}
517
518static int validate_queue_index(struct hl_device *hdev,
519				struct hl_cs_chunk *chunk,
520				enum hl_queue_type *queue_type,
521				bool *is_kernel_allocated_cb)
522{
523	struct asic_fixed_properties *asic = &hdev->asic_prop;
524	struct hw_queue_properties *hw_queue_prop;
525
526	/* This must be checked here to prevent out-of-bounds access to
527	 * hw_queues_props array
528	 */
529	if (chunk->queue_index >= asic->max_queues) {
530		dev_err(hdev->dev, "Queue index %d is invalid\n",
531			chunk->queue_index);
532		return -EINVAL;
533	}
534
535	hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
536
537	if (hw_queue_prop->type == QUEUE_TYPE_NA) {
538		dev_err(hdev->dev, "Queue index %d is invalid\n",
539			chunk->queue_index);
540		return -EINVAL;
541	}
542
543	if (hw_queue_prop->driver_only) {
544		dev_err(hdev->dev,
545			"Queue index %d is restricted for the kernel driver\n",
546			chunk->queue_index);
547		return -EINVAL;
548	}
549
550	*queue_type = hw_queue_prop->type;
551	*is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
552
553	return 0;
554}
555
556static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
557					struct hl_cb_mgr *cb_mgr,
558					struct hl_cs_chunk *chunk)
559{
560	struct hl_cb *cb;
561	u32 cb_handle;
562
563	cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
564
565	cb = hl_cb_get(hdev, cb_mgr, cb_handle);
566	if (!cb) {
567		dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
568		return NULL;
569	}
570
571	if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
572		dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
573		goto release_cb;
574	}
575
576	spin_lock(&cb->lock);
577	cb->cs_cnt++;
578	spin_unlock(&cb->lock);
579
580	return cb;
581
582release_cb:
583	hl_cb_put(cb);
584	return NULL;
585}
586
587struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
588		enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
589{
590	struct hl_cs_job *job;
591
592	job = kzalloc(sizeof(*job), GFP_ATOMIC);
593	if (!job)
594		return NULL;
595
596	job->queue_type = queue_type;
597	job->is_kernel_allocated_cb = is_kernel_allocated_cb;
598
599	if (is_cb_patched(hdev, job))
600		INIT_LIST_HEAD(&job->userptr_list);
601
602	if (job->queue_type == QUEUE_TYPE_EXT)
603		INIT_WORK(&job->finish_work, job_wq_completion);
604
605	return job;
606}
607
608static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
609				u32 num_chunks, u64 *cs_seq)
610{
611	struct hl_device *hdev = hpriv->hdev;
612	struct hl_cs_chunk *cs_chunk_array;
613	struct hl_cs_job *job;
614	struct hl_cs *cs;
615	struct hl_cb *cb;
616	bool int_queues_only = true;
617	u32 size_to_copy;
618	int rc, i;
619
620	*cs_seq = ULLONG_MAX;
621
622	if (num_chunks > HL_MAX_JOBS_PER_CS) {
623		dev_err(hdev->dev,
624			"Number of chunks can NOT be larger than %d\n",
625			HL_MAX_JOBS_PER_CS);
626		rc = -EINVAL;
627		goto out;
628	}
629
630	cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
631					GFP_ATOMIC);
632	if (!cs_chunk_array) {
633		rc = -ENOMEM;
634		goto out;
635	}
636
637	size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
638	if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
639		dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
640		rc = -EFAULT;
641		goto free_cs_chunk_array;
642	}
643
644	/* increment refcnt for context */
645	hl_ctx_get(hdev, hpriv->ctx);
646
647	rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
648	if (rc) {
649		hl_ctx_put(hpriv->ctx);
650		goto free_cs_chunk_array;
651	}
652
653	*cs_seq = cs->sequence;
654
655	hl_debugfs_add_cs(cs);
656
657	/* Validate ALL the CS chunks before submitting the CS */
658	for (i = 0 ; i < num_chunks ; i++) {
659		struct hl_cs_chunk *chunk = &cs_chunk_array[i];
660		enum hl_queue_type queue_type;
661		bool is_kernel_allocated_cb;
662
663		rc = validate_queue_index(hdev, chunk, &queue_type,
664						&is_kernel_allocated_cb);
665		if (rc) {
666			hpriv->ctx->cs_counters.parsing_drop_cnt++;
667			goto free_cs_object;
668		}
669
670		if (is_kernel_allocated_cb) {
671			cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
672			if (!cb) {
673				hpriv->ctx->cs_counters.parsing_drop_cnt++;
674				rc = -EINVAL;
675				goto free_cs_object;
676			}
677		} else {
678			cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
679		}
680
681		if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
682			int_queues_only = false;
683
684		job = hl_cs_allocate_job(hdev, queue_type,
685						is_kernel_allocated_cb);
686		if (!job) {
687			hpriv->ctx->cs_counters.out_of_mem_drop_cnt++;
688			dev_err(hdev->dev, "Failed to allocate a new job\n");
689			rc = -ENOMEM;
690			if (is_kernel_allocated_cb)
691				goto release_cb;
692
693			goto free_cs_object;
694		}
695
696		job->id = i + 1;
697		job->cs = cs;
698		job->user_cb = cb;
699		job->user_cb_size = chunk->cb_size;
700		job->hw_queue_id = chunk->queue_index;
701
702		cs->jobs_in_queue_cnt[job->hw_queue_id]++;
703
704		list_add_tail(&job->cs_node, &cs->job_list);
705
706		/*
707		 * Increment CS reference. When CS reference is 0, CS is
708		 * done and can be signaled to user and free all its resources
709		 * Only increment for JOB on external or H/W queues, because
710		 * only for those JOBs we get completion
711		 */
712		if (job->queue_type == QUEUE_TYPE_EXT ||
713				job->queue_type == QUEUE_TYPE_HW)
714			cs_get(cs);
715
716		hl_debugfs_add_job(hdev, job);
717
718		rc = cs_parser(hpriv, job);
719		if (rc) {
720			hpriv->ctx->cs_counters.parsing_drop_cnt++;
721			dev_err(hdev->dev,
722				"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
723				cs->ctx->asid, cs->sequence, job->id, rc);
724			goto free_cs_object;
725		}
726	}
727
728	if (int_queues_only) {
729		hpriv->ctx->cs_counters.parsing_drop_cnt++;
730		dev_err(hdev->dev,
731			"Reject CS %d.%llu because only internal queues jobs are present\n",
732			cs->ctx->asid, cs->sequence);
733		rc = -EINVAL;
734		goto free_cs_object;
735	}
736
737	rc = hl_hw_queue_schedule_cs(cs);
738	if (rc) {
739		if (rc != -EAGAIN)
740			dev_err(hdev->dev,
741				"Failed to submit CS %d.%llu to H/W queues, error %d\n",
742				cs->ctx->asid, cs->sequence, rc);
743		goto free_cs_object;
744	}
745
746	rc = HL_CS_STATUS_SUCCESS;
747	goto put_cs;
748
749release_cb:
750	spin_lock(&cb->lock);
751	cb->cs_cnt--;
752	spin_unlock(&cb->lock);
753	hl_cb_put(cb);
754free_cs_object:
755	cs_rollback(hdev, cs);
756	*cs_seq = ULLONG_MAX;
757	/* The path below is both for good and erroneous exits */
758put_cs:
759	/* We finished with the CS in this function, so put the ref */
760	cs_put(cs);
761free_cs_chunk_array:
762	kfree(cs_chunk_array);
763out:
764	return rc;
765}
766
767static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
768				void __user *chunks, u32 num_chunks,
769				u64 *cs_seq)
770{
771	struct hl_device *hdev = hpriv->hdev;
772	struct hl_ctx *ctx = hpriv->ctx;
773	struct hl_cs_chunk *cs_chunk_array, *chunk;
774	struct hw_queue_properties *hw_queue_prop;
775	struct hl_fence *sig_fence = NULL;
776	struct hl_cs_job *job;
777	struct hl_cs *cs;
778	struct hl_cb *cb;
779	enum hl_queue_type q_type;
780	u64 *signal_seq_arr = NULL, signal_seq;
781	u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
782	int rc;
783
784	*cs_seq = ULLONG_MAX;
785
786	if (num_chunks > HL_MAX_JOBS_PER_CS) {
787		dev_err(hdev->dev,
788			"Number of chunks can NOT be larger than %d\n",
789			HL_MAX_JOBS_PER_CS);
790		rc = -EINVAL;
791		goto out;
792	}
793
794	cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
795					GFP_ATOMIC);
796	if (!cs_chunk_array) {
797		rc = -ENOMEM;
798		goto out;
799	}
800
801	size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
802	if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
803		dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
804		rc = -EFAULT;
805		goto free_cs_chunk_array;
806	}
807
808	/* currently it is guaranteed to have only one chunk */
809	chunk = &cs_chunk_array[0];
810
811	if (chunk->queue_index >= hdev->asic_prop.max_queues) {
812		dev_err(hdev->dev, "Queue index %d is invalid\n",
813			chunk->queue_index);
814		rc = -EINVAL;
815		goto free_cs_chunk_array;
816	}
817
818	q_idx = chunk->queue_index;
819	hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
820	q_type = hw_queue_prop->type;
821
822	if ((q_idx >= hdev->asic_prop.max_queues) ||
823			(!hw_queue_prop->supports_sync_stream)) {
824		dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
825		rc = -EINVAL;
826		goto free_cs_chunk_array;
827	}
828
829	if (cs_type == CS_TYPE_WAIT) {
830		struct hl_cs_compl *sig_waitcs_cmpl;
831
832		signal_seq_arr_len = chunk->num_signal_seq_arr;
833
834		/* currently only one signal seq is supported */
835		if (signal_seq_arr_len != 1) {
836			dev_err(hdev->dev,
837				"Wait for signal CS supports only one signal CS seq\n");
838			rc = -EINVAL;
839			goto free_cs_chunk_array;
840		}
841
842		signal_seq_arr = kmalloc_array(signal_seq_arr_len,
843						sizeof(*signal_seq_arr),
844						GFP_ATOMIC);
845		if (!signal_seq_arr) {
846			rc = -ENOMEM;
847			goto free_cs_chunk_array;
848		}
849
850		size_to_copy = chunk->num_signal_seq_arr *
851				sizeof(*signal_seq_arr);
852		if (copy_from_user(signal_seq_arr,
853					u64_to_user_ptr(chunk->signal_seq_arr),
854					size_to_copy)) {
855			dev_err(hdev->dev,
856				"Failed to copy signal seq array from user\n");
857			rc = -EFAULT;
858			goto free_signal_seq_array;
859		}
860
861		/* currently it is guaranteed to have only one signal seq */
862		signal_seq = signal_seq_arr[0];
863		sig_fence = hl_ctx_get_fence(ctx, signal_seq);
864		if (IS_ERR(sig_fence)) {
865			dev_err(hdev->dev,
866				"Failed to get signal CS with seq 0x%llx\n",
867				signal_seq);
868			rc = PTR_ERR(sig_fence);
869			goto free_signal_seq_array;
870		}
871
872		if (!sig_fence) {
873			/* signal CS already finished */
874			rc = 0;
875			goto free_signal_seq_array;
876		}
877
878		sig_waitcs_cmpl =
879			container_of(sig_fence, struct hl_cs_compl, base_fence);
880
881		if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
882			dev_err(hdev->dev,
883				"CS seq 0x%llx is not of a signal CS\n",
884				signal_seq);
885			hl_fence_put(sig_fence);
886			rc = -EINVAL;
887			goto free_signal_seq_array;
888		}
889
890		if (completion_done(&sig_fence->completion)) {
891			/* signal CS already finished */
892			hl_fence_put(sig_fence);
893			rc = 0;
894			goto free_signal_seq_array;
895		}
896	}
897
898	/* increment refcnt for context */
899	hl_ctx_get(hdev, ctx);
900
901	rc = allocate_cs(hdev, ctx, cs_type, &cs);
902	if (rc) {
903		if (cs_type == CS_TYPE_WAIT)
904			hl_fence_put(sig_fence);
905		hl_ctx_put(ctx);
906		goto free_signal_seq_array;
907	}
908
909	/*
910	 * Save the signal CS fence for later initialization right before
911	 * hanging the wait CS on the queue.
912	 */
913	if (cs->type == CS_TYPE_WAIT)
914		cs->signal_fence = sig_fence;
915
916	hl_debugfs_add_cs(cs);
917
918	*cs_seq = cs->sequence;
919
920	job = hl_cs_allocate_job(hdev, q_type, true);
921	if (!job) {
922		ctx->cs_counters.out_of_mem_drop_cnt++;
923		dev_err(hdev->dev, "Failed to allocate a new job\n");
924		rc = -ENOMEM;
925		goto put_cs;
926	}
927
928	if (cs->type == CS_TYPE_WAIT)
929		cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
930	else
931		cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
932
933	cb = hl_cb_kernel_create(hdev, cb_size,
934				q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
935	if (!cb) {
936		ctx->cs_counters.out_of_mem_drop_cnt++;
937		kfree(job);
938		rc = -EFAULT;
939		goto put_cs;
940	}
941
942	job->id = 0;
943	job->cs = cs;
944	job->user_cb = cb;
945	job->user_cb->cs_cnt++;
946	job->user_cb_size = cb_size;
947	job->hw_queue_id = q_idx;
948
949	/*
950	 * No need in parsing, user CB is the patched CB.
951	 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
952	 * the CB idr anymore and to decrement its refcount as it was
953	 * incremented inside hl_cb_kernel_create().
954	 */
955	job->patched_cb = job->user_cb;
956	job->job_cb_size = job->user_cb_size;
957	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
958
959	cs->jobs_in_queue_cnt[job->hw_queue_id]++;
960
961	list_add_tail(&job->cs_node, &cs->job_list);
962
963	/* increment refcount as for external queues we get completion */
964	cs_get(cs);
965
966	hl_debugfs_add_job(hdev, job);
967
968	rc = hl_hw_queue_schedule_cs(cs);
969	if (rc) {
970		if (rc != -EAGAIN)
971			dev_err(hdev->dev,
972				"Failed to submit CS %d.%llu to H/W queues, error %d\n",
973				ctx->asid, cs->sequence, rc);
974		goto free_cs_object;
975	}
976
977	rc = HL_CS_STATUS_SUCCESS;
978	goto put_cs;
979
980free_cs_object:
981	cs_rollback(hdev, cs);
982	*cs_seq = ULLONG_MAX;
983	/* The path below is both for good and erroneous exits */
984put_cs:
985	/* We finished with the CS in this function, so put the ref */
986	cs_put(cs);
987free_signal_seq_array:
988	if (cs_type == CS_TYPE_WAIT)
989		kfree(signal_seq_arr);
990free_cs_chunk_array:
991	kfree(cs_chunk_array);
992out:
993	return rc;
994}
995
996int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
997{
998	struct hl_device *hdev = hpriv->hdev;
999	union hl_cs_args *args = data;
1000	struct hl_ctx *ctx = hpriv->ctx;
1001	void __user *chunks_execute, *chunks_restore;
1002	enum hl_cs_type cs_type;
1003	u32 num_chunks_execute, num_chunks_restore, sig_wait_flags;
1004	u64 cs_seq = ULONG_MAX;
1005	int rc, do_ctx_switch;
1006	bool need_soft_reset = false;
1007
1008	if (hl_device_disabled_or_in_reset(hdev)) {
1009		dev_warn_ratelimited(hdev->dev,
1010			"Device is %s. Can't submit new CS\n",
1011			atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1012		rc = -EBUSY;
1013		goto out;
1014	}
1015
1016	sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT;
1017
1018	if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) {
1019		dev_err(hdev->dev,
1020			"Signal and wait CS flags are mutually exclusive, context %d\n",
1021		ctx->asid);
1022		rc = -EINVAL;
1023		goto out;
1024	}
1025
1026	if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) &&
1027			(!hdev->supports_sync_stream))) {
1028		dev_err(hdev->dev, "Sync stream CS is not supported\n");
1029		rc = -EINVAL;
1030		goto out;
1031	}
1032
1033	if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL)
1034		cs_type = CS_TYPE_SIGNAL;
1035	else if (args->in.cs_flags & HL_CS_FLAGS_WAIT)
1036		cs_type = CS_TYPE_WAIT;
1037	else
1038		cs_type = CS_TYPE_DEFAULT;
1039
1040	chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
1041	num_chunks_execute = args->in.num_chunks_execute;
1042
1043	if (cs_type == CS_TYPE_DEFAULT) {
1044		if (!num_chunks_execute) {
1045			dev_err(hdev->dev,
1046				"Got execute CS with 0 chunks, context %d\n",
1047				ctx->asid);
1048			rc = -EINVAL;
1049			goto out;
1050		}
1051	} else if (num_chunks_execute != 1) {
1052		dev_err(hdev->dev,
1053			"Sync stream CS mandates one chunk only, context %d\n",
1054			ctx->asid);
1055		rc = -EINVAL;
1056		goto out;
1057	}
1058
1059	do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1060
1061	if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1062		long ret;
1063
1064		chunks_restore =
1065			(void __user *) (uintptr_t) args->in.chunks_restore;
1066		num_chunks_restore = args->in.num_chunks_restore;
1067
1068		mutex_lock(&hpriv->restore_phase_mutex);
1069
1070		if (do_ctx_switch) {
1071			rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1072			if (rc) {
1073				dev_err_ratelimited(hdev->dev,
1074					"Failed to switch to context %d, rejecting CS! %d\n",
1075					ctx->asid, rc);
1076				/*
1077				 * If we timedout, or if the device is not IDLE
1078				 * while we want to do context-switch (-EBUSY),
1079				 * we need to soft-reset because QMAN is
1080				 * probably stuck. However, we can't call to
1081				 * reset here directly because of deadlock, so
1082				 * need to do it at the very end of this
1083				 * function
1084				 */
1085				if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1086					need_soft_reset = true;
1087				mutex_unlock(&hpriv->restore_phase_mutex);
1088				goto out;
1089			}
1090		}
1091
1092		hdev->asic_funcs->restore_phase_topology(hdev);
1093
1094		if (!num_chunks_restore) {
1095			dev_dbg(hdev->dev,
1096			"Need to run restore phase but restore CS is empty\n");
1097			rc = 0;
1098		} else {
1099			rc = cs_ioctl_default(hpriv, chunks_restore,
1100						num_chunks_restore, &cs_seq);
1101		}
1102
1103		mutex_unlock(&hpriv->restore_phase_mutex);
1104
1105		if (rc) {
1106			dev_err(hdev->dev,
1107				"Failed to submit restore CS for context %d (%d)\n",
1108				ctx->asid, rc);
1109			goto out;
1110		}
1111
1112		/* Need to wait for restore completion before execution phase */
1113		if (num_chunks_restore) {
1114			ret = _hl_cs_wait_ioctl(hdev, ctx,
1115					jiffies_to_usecs(hdev->timeout_jiffies),
1116					cs_seq);
1117			if (ret <= 0) {
1118				dev_err(hdev->dev,
1119					"Restore CS for context %d failed to complete %ld\n",
1120					ctx->asid, ret);
1121				rc = -ENOEXEC;
1122				goto out;
1123			}
1124		}
1125
1126		ctx->thread_ctx_switch_wait_token = 1;
1127	} else if (!ctx->thread_ctx_switch_wait_token) {
1128		u32 tmp;
1129
1130		rc = hl_poll_timeout_memory(hdev,
1131			&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1132			100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1133
1134		if (rc == -ETIMEDOUT) {
1135			dev_err(hdev->dev,
1136				"context switch phase timeout (%d)\n", tmp);
1137			goto out;
1138		}
1139	}
1140
1141	if (cs_type == CS_TYPE_DEFAULT)
1142		rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute,
1143					&cs_seq);
1144	else
1145		rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute,
1146						num_chunks_execute, &cs_seq);
1147
1148out:
1149	if (rc != -EAGAIN) {
1150		memset(args, 0, sizeof(*args));
1151		args->out.status = rc;
1152		args->out.seq = cs_seq;
1153	}
1154
1155	if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
1156		hl_device_reset(hdev, false, false);
1157
1158	return rc;
1159}
1160
1161static long _hl_cs_wait_ioctl(struct hl_device *hdev,
1162		struct hl_ctx *ctx, u64 timeout_us, u64 seq)
1163{
1164	struct hl_fence *fence;
1165	unsigned long timeout;
1166	long rc;
1167
1168	if (timeout_us == MAX_SCHEDULE_TIMEOUT)
1169		timeout = timeout_us;
1170	else
1171		timeout = usecs_to_jiffies(timeout_us);
1172
1173	hl_ctx_get(hdev, ctx);
1174
1175	fence = hl_ctx_get_fence(ctx, seq);
1176	if (IS_ERR(fence)) {
1177		rc = PTR_ERR(fence);
1178		if (rc == -EINVAL)
1179			dev_notice_ratelimited(hdev->dev,
1180				"Can't wait on CS %llu because current CS is at seq %llu\n",
1181				seq, ctx->cs_sequence);
1182	} else if (fence) {
1183		if (!timeout_us)
1184			rc = completion_done(&fence->completion);
1185		else
1186			rc = wait_for_completion_interruptible_timeout(
1187					&fence->completion, timeout);
1188
1189		if (fence->error == -ETIMEDOUT)
1190			rc = -ETIMEDOUT;
1191		else if (fence->error == -EIO)
1192			rc = -EIO;
1193
1194		hl_fence_put(fence);
1195	} else {
1196		dev_dbg(hdev->dev,
1197			"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
1198			seq, ctx->cs_sequence);
1199		rc = 1;
1200	}
1201
1202	hl_ctx_put(ctx);
1203
1204	return rc;
1205}
1206
1207int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
1208{
1209	struct hl_device *hdev = hpriv->hdev;
1210	union hl_wait_cs_args *args = data;
1211	u64 seq = args->in.seq;
1212	long rc;
1213
1214	rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
1215
1216	memset(args, 0, sizeof(*args));
1217
1218	if (rc < 0) {
1219		if (rc == -ERESTARTSYS) {
1220			dev_err_ratelimited(hdev->dev,
1221				"user process got signal while waiting for CS handle %llu\n",
1222				seq);
1223			args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
1224			rc = -EINTR;
1225		} else if (rc == -ETIMEDOUT) {
1226			dev_err_ratelimited(hdev->dev,
1227				"CS %llu has timed-out while user process is waiting for it\n",
1228				seq);
1229			args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
1230		} else if (rc == -EIO) {
1231			dev_err_ratelimited(hdev->dev,
1232				"CS %llu has been aborted while user process is waiting for it\n",
1233				seq);
1234			args->out.status = HL_WAIT_CS_STATUS_ABORTED;
1235		}
1236		return rc;
1237	}
1238
1239	if (rc == 0)
1240		args->out.status = HL_WAIT_CS_STATUS_BUSY;
1241	else
1242		args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
1243
1244	return 0;
1245}
1246