1 /*
2  *
3  * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 /**
19  * @file mali_kbase_replay.c
20  * Replay soft job handlers
21  */
22 
23 #include <linux/dma-mapping.h>
24 #include <mali_kbase_config.h>
25 #include <mali_kbase.h>
26 #include <mali_kbase_mem.h>
27 #include <mali_kbase_mem_linux.h>
28 
29 #define JOB_NOT_STARTED 0
30 #define JOB_TYPE_NULL      (1)
31 #define JOB_TYPE_VERTEX    (5)
32 #define JOB_TYPE_TILER     (7)
33 #define JOB_TYPE_FUSED     (8)
34 #define JOB_TYPE_FRAGMENT  (9)
35 
36 #define JOB_HEADER_32_FBD_OFFSET (31*4)
37 #define JOB_HEADER_64_FBD_OFFSET (44*4)
38 
39 #define FBD_POINTER_MASK (~0x3f)
40 
41 #define SFBD_TILER_OFFSET (48*4)
42 
43 #define MFBD_TILER_OFFSET       (14*4)
44 
45 #define FBD_HIERARCHY_WEIGHTS 8
46 #define FBD_HIERARCHY_MASK_MASK 0x1fff
47 
48 #define FBD_TYPE 1
49 
50 #define HIERARCHY_WEIGHTS 13
51 
52 #define JOB_HEADER_ID_MAX                 0xffff
53 
54 #define JOB_SOURCE_ID(status)		(((status) >> 16) & 0xFFFF)
55 #define JOB_POLYGON_LIST		(0x03)
56 
57 struct fragment_job {
58 	struct job_descriptor_header header;
59 
60 	u32 x[2];
61 	union {
62 		u64 _64;
63 		u32 _32;
64 	} fragment_fbd;
65 };
66 
dump_job_head(struct kbase_context *kctx, char *head_str, struct job_descriptor_header *job)67 static void dump_job_head(struct kbase_context *kctx, char *head_str,
68 		struct job_descriptor_header *job)
69 {
70 #ifdef CONFIG_MALI_DEBUG
71 	dev_dbg(kctx->kbdev->dev, "%s\n", head_str);
72 	dev_dbg(kctx->kbdev->dev,
73 			"addr                  = %p\n"
74 			"exception_status      = %x (Source ID: 0x%x Access: 0x%x Exception: 0x%x)\n"
75 			"first_incomplete_task = %x\n"
76 			"fault_pointer         = %llx\n"
77 			"job_descriptor_size   = %x\n"
78 			"job_type              = %x\n"
79 			"job_barrier           = %x\n"
80 			"_reserved_01          = %x\n"
81 			"_reserved_02          = %x\n"
82 			"_reserved_03          = %x\n"
83 			"_reserved_04/05       = %x,%x\n"
84 			"job_index             = %x\n"
85 			"dependencies          = %x,%x\n",
86 			job, job->exception_status,
87 			JOB_SOURCE_ID(job->exception_status),
88 			(job->exception_status >> 8) & 0x3,
89 			job->exception_status  & 0xFF,
90 			job->first_incomplete_task,
91 			job->fault_pointer, job->job_descriptor_size,
92 			job->job_type, job->job_barrier, job->_reserved_01,
93 			job->_reserved_02, job->_reserved_03,
94 			job->_reserved_04, job->_reserved_05,
95 			job->job_index,
96 			job->job_dependency_index_1,
97 			job->job_dependency_index_2);
98 
99 	if (job->job_descriptor_size)
100 		dev_dbg(kctx->kbdev->dev, "next               = %llx\n",
101 				job->next_job._64);
102 	else
103 		dev_dbg(kctx->kbdev->dev, "next               = %x\n",
104 				job->next_job._32);
105 #endif
106 }
107 
kbasep_replay_reset_sfbd(struct kbase_context *kctx, u64 fbd_address, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight)108 static int kbasep_replay_reset_sfbd(struct kbase_context *kctx,
109 		u64 fbd_address, u64 tiler_heap_free,
110 		u16 hierarchy_mask, u32 default_weight)
111 {
112 	struct {
113 		u32 padding_1[1];
114 		u32 flags;
115 		u64 padding_2[2];
116 		u64 heap_free_address;
117 		u32 padding[8];
118 		u32 weights[FBD_HIERARCHY_WEIGHTS];
119 	} *fbd_tiler;
120 	struct kbase_vmap_struct map;
121 
122 	dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
123 
124 	fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET,
125 			sizeof(*fbd_tiler), &map);
126 	if (!fbd_tiler) {
127 		dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
128 		return -EINVAL;
129 	}
130 
131 #ifdef CONFIG_MALI_DEBUG
132 	dev_dbg(kctx->kbdev->dev,
133 		"FBD tiler:\n"
134 		"flags = %x\n"
135 		"heap_free_address = %llx\n",
136 		fbd_tiler->flags, fbd_tiler->heap_free_address);
137 #endif
138 	if (hierarchy_mask) {
139 		u32 weights[HIERARCHY_WEIGHTS];
140 		u16 old_hierarchy_mask = fbd_tiler->flags &
141 						       FBD_HIERARCHY_MASK_MASK;
142 		int i, j = 0;
143 
144 		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
145 			if (old_hierarchy_mask & (1 << i)) {
146 				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
147 				weights[i] = fbd_tiler->weights[j++];
148 			} else {
149 				weights[i] = default_weight;
150 			}
151 		}
152 
153 
154 		dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
155 				old_hierarchy_mask, hierarchy_mask);
156 
157 		for (i = 0; i < HIERARCHY_WEIGHTS; i++)
158 			dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
159 					i, weights[i]);
160 
161 		j = 0;
162 
163 		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
164 			if (hierarchy_mask & (1 << i)) {
165 				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
166 
167 				dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n",
168 						i, weights[i], j);
169 
170 				fbd_tiler->weights[j++] = weights[i];
171 			}
172 		}
173 
174 		for (; j < FBD_HIERARCHY_WEIGHTS; j++)
175 			fbd_tiler->weights[j] = 0;
176 
177 		fbd_tiler->flags = hierarchy_mask | (1 << 16);
178 	}
179 
180 	fbd_tiler->heap_free_address = tiler_heap_free;
181 
182 	dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n",
183 			fbd_tiler->heap_free_address, fbd_tiler->flags);
184 
185 	kbase_vunmap(kctx, &map);
186 
187 	return 0;
188 }
189 
kbasep_replay_reset_mfbd(struct kbase_context *kctx, u64 fbd_address, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight)190 static int kbasep_replay_reset_mfbd(struct kbase_context *kctx,
191 		u64 fbd_address, u64 tiler_heap_free,
192 		u16 hierarchy_mask, u32 default_weight)
193 {
194 	struct kbase_vmap_struct map;
195 	struct {
196 		u32 padding_0;
197 		u32 flags;
198 		u64 padding_1[2];
199 		u64 heap_free_address;
200 		u64 padding_2;
201 		u32 weights[FBD_HIERARCHY_WEIGHTS];
202 	} *fbd_tiler;
203 
204 	dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
205 
206 	fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET,
207 			sizeof(*fbd_tiler), &map);
208 	if (!fbd_tiler) {
209 		dev_err(kctx->kbdev->dev,
210 			       "kbasep_replay_reset_fbd: failed to map fbd\n");
211 		return -EINVAL;
212 	}
213 
214 #ifdef CONFIG_MALI_DEBUG
215 	dev_dbg(kctx->kbdev->dev, "FBD tiler:\n"
216 			"flags = %x\n"
217 			"heap_free_address = %llx\n",
218 			fbd_tiler->flags,
219 			fbd_tiler->heap_free_address);
220 #endif
221 	if (hierarchy_mask) {
222 		u32 weights[HIERARCHY_WEIGHTS];
223 		u16 old_hierarchy_mask = (fbd_tiler->flags) &
224 						       FBD_HIERARCHY_MASK_MASK;
225 		int i, j = 0;
226 
227 		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
228 			if (old_hierarchy_mask & (1 << i)) {
229 				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
230 				weights[i] = fbd_tiler->weights[j++];
231 			} else {
232 				weights[i] = default_weight;
233 			}
234 		}
235 
236 
237 		dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
238 				old_hierarchy_mask, hierarchy_mask);
239 
240 		for (i = 0; i < HIERARCHY_WEIGHTS; i++)
241 			dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
242 					i, weights[i]);
243 
244 		j = 0;
245 
246 		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
247 			if (hierarchy_mask & (1 << i)) {
248 				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
249 
250 				dev_dbg(kctx->kbdev->dev,
251 				" Writing hierarchy level %02d (%08x) to %d\n",
252 							     i, weights[i], j);
253 
254 				fbd_tiler->weights[j++] = weights[i];
255 			}
256 		}
257 
258 		for (; j < FBD_HIERARCHY_WEIGHTS; j++)
259 			fbd_tiler->weights[j] = 0;
260 
261 		fbd_tiler->flags = hierarchy_mask | (1 << 16);
262 	}
263 
264 	fbd_tiler->heap_free_address = tiler_heap_free;
265 
266 	kbase_vunmap(kctx, &map);
267 
268 	return 0;
269 }
270 
271 /**
272  * @brief Reset the status of an FBD pointed to by a tiler job
273  *
274  * This performs two functions :
275  * - Set the hierarchy mask
276  * - Reset the tiler free heap address
277  *
278  * @param[in] kctx              Context pointer
279  * @param[in] job_header        Address of job header to reset.
280  * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
281  * @param[in] hierarchy_mask    The hierarchy mask to use
282  * @param[in] default_weight    Default hierarchy weight to write when no other
283  *                              weight is given in the FBD
284  * @param[in] job_64            true if this job is using 64-bit
285  *                              descriptors
286  *
287  * @return 0 on success, error code on failure
288  */
kbasep_replay_reset_tiler_job(struct kbase_context *kctx, u64 job_header, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight, bool job_64)289 static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx,
290 		u64 job_header,	u64 tiler_heap_free,
291 		u16 hierarchy_mask, u32 default_weight,	bool job_64)
292 {
293 	struct kbase_vmap_struct map;
294 	u64 fbd_address;
295 
296 	if (job_64) {
297 		u64 *job_ext;
298 
299 		job_ext = kbase_vmap(kctx,
300 				job_header + JOB_HEADER_64_FBD_OFFSET,
301 				sizeof(*job_ext), &map);
302 
303 		if (!job_ext) {
304 			dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
305 			return -EINVAL;
306 		}
307 
308 		fbd_address = *job_ext;
309 
310 		kbase_vunmap(kctx, &map);
311 	} else {
312 		u32 *job_ext;
313 
314 		job_ext = kbase_vmap(kctx,
315 				job_header + JOB_HEADER_32_FBD_OFFSET,
316 				sizeof(*job_ext), &map);
317 
318 		if (!job_ext) {
319 			dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
320 			return -EINVAL;
321 		}
322 
323 		fbd_address = *job_ext;
324 
325 		kbase_vunmap(kctx, &map);
326 	}
327 
328 	if (fbd_address & FBD_TYPE) {
329 		return kbasep_replay_reset_mfbd(kctx,
330 						fbd_address & FBD_POINTER_MASK,
331 						tiler_heap_free,
332 						hierarchy_mask,
333 						default_weight);
334 	} else {
335 		return kbasep_replay_reset_sfbd(kctx,
336 						fbd_address & FBD_POINTER_MASK,
337 						tiler_heap_free,
338 						hierarchy_mask,
339 						default_weight);
340 	}
341 }
342 
343 /**
344  * @brief Reset the status of a job
345  *
346  * This performs the following functions :
347  *
348  * - Reset the Job Status field of each job to NOT_STARTED.
349  * - Set the Job Type field of any Vertex Jobs to Null Job.
350  * - For any jobs using an FBD, set the Tiler Heap Free field to the value of
351  *   the tiler_heap_free parameter, and set the hierarchy level mask to the
352  *   hier_mask parameter.
353  * - Offset HW dependencies by the hw_job_id_offset parameter
354  * - Set the Perform Job Barrier flag if this job is the first in the chain
355  * - Read the address of the next job header
356  *
357  * @param[in] kctx              Context pointer
358  * @param[in,out] job_header    Address of job header to reset. Set to address
359  *                              of next job header on exit.
360  * @param[in] prev_jc           Previous job chain to link to, if this job is
361  *                              the last in the chain.
362  * @param[in] hw_job_id_offset  Offset for HW job IDs
363  * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
364  * @param[in] hierarchy_mask    The hierarchy mask to use
365  * @param[in] default_weight    Default hierarchy weight to write when no other
366  *                              weight is given in the FBD
367  * @param[in] first_in_chain    true if this job is the first in the chain
368  * @param[in] fragment_chain    true if this job is in the fragment chain
369  *
370  * @return 0 on success, error code on failure
371  */
kbasep_replay_reset_job(struct kbase_context *kctx, u64 *job_header, u64 prev_jc, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight, u16 hw_job_id_offset, bool first_in_chain, bool fragment_chain)372 static int kbasep_replay_reset_job(struct kbase_context *kctx,
373 		u64 *job_header, u64 prev_jc,
374 		u64 tiler_heap_free, u16 hierarchy_mask,
375 		u32 default_weight, u16 hw_job_id_offset,
376 		bool first_in_chain, bool fragment_chain)
377 {
378 	struct fragment_job *frag_job;
379 	struct job_descriptor_header *job;
380 	u64 new_job_header;
381 	struct kbase_vmap_struct map;
382 
383 	frag_job = kbase_vmap(kctx, *job_header, sizeof(*frag_job), &map);
384 	if (!frag_job) {
385 		dev_err(kctx->kbdev->dev,
386 				 "kbasep_replay_parse_jc: failed to map jc\n");
387 		return -EINVAL;
388 	}
389 	job = &frag_job->header;
390 
391 	dump_job_head(kctx, "Job header:", job);
392 
393 	if (job->exception_status == JOB_NOT_STARTED && !fragment_chain) {
394 		dev_err(kctx->kbdev->dev, "Job already not started\n");
395 		goto out_unmap;
396 	}
397 	job->exception_status = JOB_NOT_STARTED;
398 
399 	if (job->job_type == JOB_TYPE_VERTEX)
400 		job->job_type = JOB_TYPE_NULL;
401 
402 	if (job->job_type == JOB_TYPE_FUSED) {
403 		dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n");
404 		goto out_unmap;
405 	}
406 
407 	if (first_in_chain)
408 		job->job_barrier = 1;
409 
410 	if ((job->job_dependency_index_1 + hw_job_id_offset) >
411 			JOB_HEADER_ID_MAX ||
412 	    (job->job_dependency_index_2 + hw_job_id_offset) >
413 			JOB_HEADER_ID_MAX ||
414 	    (job->job_index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
415 		dev_err(kctx->kbdev->dev,
416 			     "Job indicies/dependencies out of valid range\n");
417 		goto out_unmap;
418 	}
419 
420 	if (job->job_dependency_index_1)
421 		job->job_dependency_index_1 += hw_job_id_offset;
422 	if (job->job_dependency_index_2)
423 		job->job_dependency_index_2 += hw_job_id_offset;
424 
425 	job->job_index += hw_job_id_offset;
426 
427 	if (job->job_descriptor_size) {
428 		new_job_header = job->next_job._64;
429 		if (!job->next_job._64)
430 			job->next_job._64 = prev_jc;
431 	} else {
432 		new_job_header = job->next_job._32;
433 		if (!job->next_job._32)
434 			job->next_job._32 = prev_jc;
435 	}
436 	dump_job_head(kctx, "Updated to:", job);
437 
438 	if (job->job_type == JOB_TYPE_TILER) {
439 		bool job_64 = job->job_descriptor_size != 0;
440 
441 		if (kbasep_replay_reset_tiler_job(kctx, *job_header,
442 				tiler_heap_free, hierarchy_mask,
443 				default_weight, job_64) != 0)
444 			goto out_unmap;
445 
446 	} else if (job->job_type == JOB_TYPE_FRAGMENT) {
447 		u64 fbd_address;
448 
449 		if (job->job_descriptor_size)
450 			fbd_address = frag_job->fragment_fbd._64;
451 		else
452 			fbd_address = (u64)frag_job->fragment_fbd._32;
453 
454 		if (fbd_address & FBD_TYPE) {
455 			if (kbasep_replay_reset_mfbd(kctx,
456 					fbd_address & FBD_POINTER_MASK,
457 					tiler_heap_free,
458 					hierarchy_mask,
459 					default_weight) != 0)
460 				goto out_unmap;
461 		} else {
462 			if (kbasep_replay_reset_sfbd(kctx,
463 					fbd_address & FBD_POINTER_MASK,
464 					tiler_heap_free,
465 					hierarchy_mask,
466 					default_weight) != 0)
467 				goto out_unmap;
468 		}
469 	}
470 
471 	kbase_vunmap(kctx, &map);
472 
473 	*job_header = new_job_header;
474 
475 	return 0;
476 
477 out_unmap:
478 	kbase_vunmap(kctx, &map);
479 	return -EINVAL;
480 }
481 
482 /**
483  * @brief Find the highest job ID in a job chain
484  *
485  * @param[in] kctx        Context pointer
486  * @param[in] jc          Job chain start address
487  * @param[out] hw_job_id  Highest job ID in chain
488  *
489  * @return 0 on success, error code on failure
490  */
kbasep_replay_find_hw_job_id(struct kbase_context *kctx, u64 jc, u16 *hw_job_id)491 static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx,
492 		u64 jc,	u16 *hw_job_id)
493 {
494 	while (jc) {
495 		struct job_descriptor_header *job;
496 		struct kbase_vmap_struct map;
497 
498 		dev_dbg(kctx->kbdev->dev,
499 			"kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
500 
501 		job = kbase_vmap(kctx, jc, sizeof(*job), &map);
502 		if (!job) {
503 			dev_err(kctx->kbdev->dev, "failed to map jc\n");
504 
505 			return -EINVAL;
506 		}
507 
508 		if (job->job_index > *hw_job_id)
509 			*hw_job_id = job->job_index;
510 
511 		if (job->job_descriptor_size)
512 			jc = job->next_job._64;
513 		else
514 			jc = job->next_job._32;
515 
516 		kbase_vunmap(kctx, &map);
517 	}
518 
519 	return 0;
520 }
521 
522 /**
523  * @brief Reset the status of a number of jobs
524  *
525  * This function walks the provided job chain, and calls
526  * kbasep_replay_reset_job for each job. It also links the job chain to the
527  * provided previous job chain.
528  *
529  * The function will fail if any of the jobs passed already have status of
530  * NOT_STARTED.
531  *
532  * @param[in] kctx              Context pointer
533  * @param[in] jc                Job chain to be processed
534  * @param[in] prev_jc           Job chain to be added to. May be NULL
535  * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
536  * @param[in] hierarchy_mask    The hierarchy mask to use
537  * @param[in] default_weight    Default hierarchy weight to write when no other
538  *                              weight is given in the FBD
539  * @param[in] hw_job_id_offset  Offset for HW job IDs
540  * @param[in] fragment_chain    true if this chain is the fragment chain
541  *
542  * @return 0 on success, error code otherwise
543  */
kbasep_replay_parse_jc(struct kbase_context *kctx, u64 jc, u64 prev_jc, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight, u16 hw_job_id_offset, bool fragment_chain)544 static int kbasep_replay_parse_jc(struct kbase_context *kctx,
545 		u64 jc,	u64 prev_jc,
546 		u64 tiler_heap_free, u16 hierarchy_mask,
547 		u32 default_weight, u16 hw_job_id_offset,
548 		bool fragment_chain)
549 {
550 	bool first_in_chain = true;
551 	int nr_jobs = 0;
552 
553 	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
554 			jc, hw_job_id_offset);
555 
556 	while (jc) {
557 		dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc);
558 
559 		if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
560 				tiler_heap_free, hierarchy_mask,
561 				default_weight, hw_job_id_offset,
562 				first_in_chain, fragment_chain) != 0)
563 			return -EINVAL;
564 
565 		first_in_chain = false;
566 
567 		nr_jobs++;
568 		if (fragment_chain &&
569 		    nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
570 			dev_err(kctx->kbdev->dev,
571 				"Exceeded maximum number of jobs in fragment chain\n");
572 			return -EINVAL;
573 		}
574 	}
575 
576 	return 0;
577 }
578 
579 /**
580  * @brief Reset the status of a replay job, and set up dependencies
581  *
582  * This performs the actions to allow the replay job to be re-run following
583  * completion of the passed dependency.
584  *
585  * @param[in] katom     The atom to be reset
586  * @param[in] dep_atom  The dependency to be attached to the atom
587  */
kbasep_replay_reset_softjob(struct kbase_jd_atom *katom, struct kbase_jd_atom *dep_atom)588 static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom,
589 		struct kbase_jd_atom *dep_atom)
590 {
591 	katom->status = KBASE_JD_ATOM_STATE_QUEUED;
592 	kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA);
593 	list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
594 }
595 
596 /**
597  * @brief Allocate an unused katom
598  *
599  * This will search the provided context for an unused katom, and will mark it
600  * as KBASE_JD_ATOM_STATE_QUEUED.
601  *
602  * If no atoms are available then the function will fail.
603  *
604  * @param[in] kctx      Context pointer
605  * @return An atom ID, or -1 on failure
606  */
kbasep_allocate_katom(struct kbase_context *kctx)607 static int kbasep_allocate_katom(struct kbase_context *kctx)
608 {
609 	struct kbase_jd_context *jctx = &kctx->jctx;
610 	int i;
611 
612 	for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
613 		if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
614 			jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
615 			dev_dbg(kctx->kbdev->dev,
616 				  "kbasep_allocate_katom: Allocated atom %d\n",
617 									    i);
618 			return i;
619 		}
620 	}
621 
622 	return -1;
623 }
624 
625 /**
626  * @brief Release a katom
627  *
628  * This will mark the provided atom as available, and remove any dependencies.
629  *
630  * For use on error path.
631  *
632  * @param[in] kctx      Context pointer
633  * @param[in] atom_id   ID of atom to release
634  */
kbasep_release_katom(struct kbase_context *kctx, int atom_id)635 static void kbasep_release_katom(struct kbase_context *kctx, int atom_id)
636 {
637 	struct kbase_jd_context *jctx = &kctx->jctx;
638 
639 	dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n",
640 			atom_id);
641 
642 	while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
643 		list_del(jctx->atoms[atom_id].dep_head[0].next);
644 
645 	while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
646 		list_del(jctx->atoms[atom_id].dep_head[1].next);
647 
648 	jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
649 }
650 
kbasep_replay_create_atom(struct kbase_context *kctx, struct base_jd_atom_v2 *atom, int atom_nr, base_jd_prio prio)651 static void kbasep_replay_create_atom(struct kbase_context *kctx,
652 				      struct base_jd_atom_v2 *atom,
653 				      int atom_nr,
654 				      base_jd_prio prio)
655 {
656 	atom->nr_extres = 0;
657 	atom->extres_list.value = NULL;
658 	atom->device_nr = 0;
659 	atom->prio = prio;
660 	atom->atom_number = atom_nr;
661 
662 	base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID);
663 	base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID);
664 
665 	atom->udata.blob[0] = 0;
666 	atom->udata.blob[1] = 0;
667 }
668 
669 /**
670  * @brief Create two atoms for the purpose of replaying jobs
671  *
672  * Two atoms are allocated and created. The jc pointer is not set at this
673  * stage. The second atom has a dependency on the first. The remaining fields
674  * are set up as follows :
675  *
676  * - No external resources. Any required external resources will be held by the
677  *   replay atom.
678  * - device_nr is set to 0. This is not relevant as
679  *   BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
680  * - Priority is inherited from the replay job.
681  *
682  * @param[out] t_atom      Atom to use for tiler jobs
683  * @param[out] f_atom      Atom to use for fragment jobs
684  * @param[in]  prio        Priority of new atom (inherited from replay soft
685  *                         job)
686  * @return 0 on success, error code on failure
687  */
kbasep_replay_create_atoms(struct kbase_context *kctx, struct base_jd_atom_v2 *t_atom, struct base_jd_atom_v2 *f_atom, base_jd_prio prio)688 static int kbasep_replay_create_atoms(struct kbase_context *kctx,
689 		struct base_jd_atom_v2 *t_atom,
690 		struct base_jd_atom_v2 *f_atom,
691 		base_jd_prio prio)
692 {
693 	int t_atom_nr, f_atom_nr;
694 
695 	t_atom_nr = kbasep_allocate_katom(kctx);
696 	if (t_atom_nr < 0) {
697 		dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
698 		return -EINVAL;
699 	}
700 
701 	f_atom_nr = kbasep_allocate_katom(kctx);
702 	if (f_atom_nr < 0) {
703 		dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
704 		kbasep_release_katom(kctx, t_atom_nr);
705 		return -EINVAL;
706 	}
707 
708 	kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
709 	kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
710 
711 	base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA);
712 
713 	return 0;
714 }
715 
716 #ifdef CONFIG_MALI_DEBUG
payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)717 static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)
718 {
719 	u64 next;
720 
721 	dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
722 	next = payload->tiler_jc_list;
723 
724 	while (next) {
725 		struct kbase_vmap_struct map;
726 		base_jd_replay_jc *jc_struct;
727 
728 		jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map);
729 
730 		if (!jc_struct)
731 			return;
732 
733 		dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n",
734 				jc_struct, jc_struct->jc, jc_struct->next);
735 
736 		next = jc_struct->next;
737 
738 		kbase_vunmap(kctx, &map);
739 	}
740 }
741 #endif
742 
743 /**
744  * @brief Parse a base_jd_replay_payload provided by userspace
745  *
746  * This will read the payload from userspace, and parse the job chains.
747  *
748  * @param[in] kctx         Context pointer
749  * @param[in] replay_atom  Replay soft job atom
750  * @param[in] t_atom       Atom to use for tiler jobs
751  * @param[in] f_atom       Atom to use for fragment jobs
752  * @return 0 on success, error code on failure
753  */
kbasep_replay_parse_payload(struct kbase_context *kctx, struct kbase_jd_atom *replay_atom, struct base_jd_atom_v2 *t_atom, struct base_jd_atom_v2 *f_atom)754 static int kbasep_replay_parse_payload(struct kbase_context *kctx,
755 					      struct kbase_jd_atom *replay_atom,
756 					      struct base_jd_atom_v2 *t_atom,
757 					      struct base_jd_atom_v2 *f_atom)
758 {
759 	base_jd_replay_payload *payload = NULL;
760 	u64 next;
761 	u64 prev_jc = 0;
762 	u16 hw_job_id_offset = 0;
763 	int ret = -EINVAL;
764 	struct kbase_vmap_struct map;
765 
766 	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n",
767 			replay_atom->jc, sizeof(payload));
768 
769 	payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map);
770 	if (!payload) {
771 		dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
772 		return -EINVAL;
773 	}
774 
775 #ifdef BASE_LEGACY_UK10_2_SUPPORT
776 	if (KBASE_API_VERSION(10, 3) > replay_atom->kctx->api_version) {
777 		base_jd_replay_payload_uk10_2 *payload_uk10_2;
778 		u16 tiler_core_req;
779 		u16 fragment_core_req;
780 
781 		payload_uk10_2 = (base_jd_replay_payload_uk10_2 *) payload;
782 		memcpy(&tiler_core_req, &payload_uk10_2->tiler_core_req,
783 				sizeof(tiler_core_req));
784 		memcpy(&fragment_core_req, &payload_uk10_2->fragment_core_req,
785 				sizeof(fragment_core_req));
786 		payload->tiler_core_req = (u32)(tiler_core_req & 0x7fff);
787 		payload->fragment_core_req = (u32)(fragment_core_req & 0x7fff);
788 	}
789 #endif /* BASE_LEGACY_UK10_2_SUPPORT */
790 
791 #ifdef CONFIG_MALI_DEBUG
792 	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
793 	dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
794 				  "tiler_jc_list            = %llx\n"
795 				  "fragment_jc              = %llx\n"
796 				  "tiler_heap_free          = %llx\n"
797 				  "fragment_hierarchy_mask  = %x\n"
798 				  "tiler_hierarchy_mask     = %x\n"
799 				  "hierarchy_default_weight = %x\n"
800 				  "tiler_core_req           = %x\n"
801 				  "fragment_core_req        = %x\n",
802 							payload->tiler_jc_list,
803 							  payload->fragment_jc,
804 						      payload->tiler_heap_free,
805 					      payload->fragment_hierarchy_mask,
806 						 payload->tiler_hierarchy_mask,
807 					     payload->hierarchy_default_weight,
808 						       payload->tiler_core_req,
809 						   payload->fragment_core_req);
810 	payload_dump(kctx, payload);
811 #endif
812 	t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER;
813 	f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER;
814 
815 	/* Sanity check core requirements*/
816 	if ((t_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_T ||
817 	    (f_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_FS ||
818 	     t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
819 	     f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
820 
821 		int t_atom_type = t_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP;
822 		int f_atom_type = f_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP & ~BASE_JD_REQ_FS_AFBC;
823 		int t_has_ex_res = t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
824 		int f_has_ex_res = f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
825 
826 		if (t_atom_type != BASE_JD_REQ_T) {
827 			dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom not a tiler job. Was: 0x%x\n Expected: 0x%x",
828 			    t_atom_type, BASE_JD_REQ_T);
829 		}
830 		if (f_atom_type != BASE_JD_REQ_FS) {
831 			dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom not a fragment shader. Was 0x%x Expected: 0x%x\n",
832 			    f_atom_type, BASE_JD_REQ_FS);
833 		}
834 		if (t_has_ex_res) {
835 			dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom has external resources.\n");
836 		}
837 		if (f_has_ex_res) {
838 			dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom has external resources.\n");
839 		}
840 
841 		goto out;
842 	}
843 
844 	/* Process tiler job chains */
845 	next = payload->tiler_jc_list;
846 	if (!next) {
847 		dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n");
848 		goto out;
849 	}
850 
851 	while (next) {
852 		base_jd_replay_jc *jc_struct;
853 		struct kbase_vmap_struct jc_map;
854 		u64 jc;
855 
856 		jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map);
857 
858 		if (!jc_struct) {
859 			dev_err(kctx->kbdev->dev, "Failed to map jc struct\n");
860 			goto out;
861 		}
862 
863 		jc = jc_struct->jc;
864 		next = jc_struct->next;
865 		if (next)
866 			jc_struct->jc = 0;
867 
868 		kbase_vunmap(kctx, &jc_map);
869 
870 		if (jc) {
871 			u16 max_hw_job_id = 0;
872 
873 			if (kbasep_replay_find_hw_job_id(kctx, jc,
874 					&max_hw_job_id) != 0)
875 				goto out;
876 
877 			if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
878 					payload->tiler_heap_free,
879 					payload->tiler_hierarchy_mask,
880 					payload->hierarchy_default_weight,
881 					hw_job_id_offset, false) != 0) {
882 				goto out;
883 			}
884 
885 			hw_job_id_offset += max_hw_job_id;
886 
887 			prev_jc = jc;
888 		}
889 	}
890 	t_atom->jc = prev_jc;
891 
892 	/* Process fragment job chain */
893 	f_atom->jc = payload->fragment_jc;
894 	if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
895 			payload->tiler_heap_free,
896 			payload->fragment_hierarchy_mask,
897 			payload->hierarchy_default_weight, 0,
898 			true) != 0) {
899 		goto out;
900 	}
901 
902 	if (!t_atom->jc || !f_atom->jc) {
903 		dev_err(kctx->kbdev->dev, "Invalid payload\n");
904 		goto out;
905 	}
906 
907 	dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
908 			t_atom->jc, f_atom->jc);
909 	ret = 0;
910 
911 out:
912 	kbase_vunmap(kctx, &map);
913 
914 	return ret;
915 }
916 
kbase_replay_process_worker(struct work_struct *data)917 static void kbase_replay_process_worker(struct work_struct *data)
918 {
919 	struct kbase_jd_atom *katom;
920 	struct kbase_context *kctx;
921 	struct kbase_jd_context *jctx;
922 	bool need_to_try_schedule_context = false;
923 
924 	struct base_jd_atom_v2 t_atom, f_atom;
925 	struct kbase_jd_atom *t_katom, *f_katom;
926 	base_jd_prio atom_prio;
927 
928 	katom = container_of(data, struct kbase_jd_atom, work);
929 	kctx = katom->kctx;
930 	jctx = &kctx->jctx;
931 
932 	mutex_lock(&jctx->lock);
933 
934 	atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority);
935 
936 	if (kbasep_replay_create_atoms(
937 			kctx, &t_atom, &f_atom, atom_prio) != 0) {
938 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
939 		goto out;
940 	}
941 
942 	t_katom = &jctx->atoms[t_atom.atom_number];
943 	f_katom = &jctx->atoms[f_atom.atom_number];
944 
945 	if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) {
946 		kbasep_release_katom(kctx, t_atom.atom_number);
947 		kbasep_release_katom(kctx, f_atom.atom_number);
948 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
949 		goto out;
950 	}
951 
952 	kbasep_replay_reset_softjob(katom, f_katom);
953 
954 	need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
955 	if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
956 		dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
957 		kbasep_release_katom(kctx, f_atom.atom_number);
958 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
959 		goto out;
960 	}
961 	need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
962 	if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
963 		dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
964 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
965 		goto out;
966 	}
967 
968 	katom->event_code = BASE_JD_EVENT_DONE;
969 
970 out:
971 	if (katom->event_code != BASE_JD_EVENT_DONE) {
972 		kbase_disjoint_state_down(kctx->kbdev);
973 
974 		need_to_try_schedule_context |= jd_done_nolock(katom, NULL);
975 	}
976 
977 	if (need_to_try_schedule_context)
978 		kbase_js_sched_all(kctx->kbdev);
979 
980 	mutex_unlock(&jctx->lock);
981 }
982 
983 /**
984  * @brief Check job replay fault
985  *
986  * This will read the job payload, checks fault type and source, then decides
987  * whether replay is required.
988  *
989  * @param[in] katom       The atom to be processed
990  * @return  true (success) if replay required or false on failure.
991  */
kbase_replay_fault_check(struct kbase_jd_atom *katom)992 static bool kbase_replay_fault_check(struct kbase_jd_atom *katom)
993 {
994 	struct kbase_context *kctx = katom->kctx;
995 	struct device *dev = kctx->kbdev->dev;
996 	base_jd_replay_payload *payload;
997 	u64 job_header;
998 	u64 job_loop_detect;
999 	struct job_descriptor_header *job;
1000 	struct kbase_vmap_struct job_map;
1001 	struct kbase_vmap_struct map;
1002 	bool err = false;
1003 
1004 	/* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
1005 	 * if force_replay is enabled.
1006 	 */
1007 	if (BASE_JD_EVENT_TERMINATED == katom->event_code) {
1008 		return false;
1009 	} else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) {
1010 		return true;
1011 	} else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) {
1012 		katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
1013 		return true;
1014 	} else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
1015 		/* No replay for faults of type other than
1016 		 * BASE_JD_EVENT_DATA_INVALID_FAULT.
1017 		 */
1018 		return false;
1019 	}
1020 
1021 	/* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc
1022 	 * to find out whether the source of exception is POLYGON_LIST. Replay
1023 	 * is required if the source of fault is POLYGON_LIST.
1024 	 */
1025 	payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map);
1026 	if (!payload) {
1027 		dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n");
1028 		return false;
1029 	}
1030 
1031 #ifdef CONFIG_MALI_DEBUG
1032 	dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload);
1033 	dev_dbg(dev, "\nPayload structure:\n"
1034 		     "fragment_jc              = 0x%llx\n"
1035 		     "fragment_hierarchy_mask  = 0x%x\n"
1036 		     "fragment_core_req        = 0x%x\n",
1037 		     payload->fragment_jc,
1038 		     payload->fragment_hierarchy_mask,
1039 		     payload->fragment_core_req);
1040 #endif
1041 	/* Process fragment job chain */
1042 	job_header      = (u64) payload->fragment_jc;
1043 	job_loop_detect = job_header;
1044 	while (job_header) {
1045 		job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map);
1046 		if (!job) {
1047 			dev_err(dev, "failed to map jc\n");
1048 			/* unmap payload*/
1049 			kbase_vunmap(kctx, &map);
1050 			return false;
1051 		}
1052 
1053 
1054 		dump_job_head(kctx, "\njob_head structure:\n", job);
1055 
1056 		/* Replay only when the polygon list reader caused the
1057 		 * DATA_INVALID_FAULT */
1058 		if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) &&
1059 		   (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->exception_status))) {
1060 			err = true;
1061 			kbase_vunmap(kctx, &job_map);
1062 			break;
1063 		}
1064 
1065 		/* Move on to next fragment job in the list */
1066 		if (job->job_descriptor_size)
1067 			job_header = job->next_job._64;
1068 		else
1069 			job_header = job->next_job._32;
1070 
1071 		kbase_vunmap(kctx, &job_map);
1072 
1073 		/* Job chain loop detected */
1074 		if (job_header == job_loop_detect)
1075 			break;
1076 	}
1077 
1078 	/* unmap payload*/
1079 	kbase_vunmap(kctx, &map);
1080 
1081 	return err;
1082 }
1083 
1084 
1085 /**
1086  * @brief Process a replay job
1087  *
1088  * Called from kbase_process_soft_job.
1089  *
1090  * On exit, if the job has completed, katom->event_code will have been updated.
1091  * If the job has not completed, and is replaying jobs, then the atom status
1092  * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
1093  *
1094  * @param[in] katom  The atom to be processed
1095  * @return           false if the atom has completed
1096  *                   true if the atom is replaying jobs
1097  */
kbase_replay_process(struct kbase_jd_atom *katom)1098 bool kbase_replay_process(struct kbase_jd_atom *katom)
1099 {
1100 	struct kbase_context *kctx = katom->kctx;
1101 	struct kbase_device *kbdev = kctx->kbdev;
1102 
1103 	/* Don't replay this atom if these issues are not present in the
1104 	 * hardware */
1105 	if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) &&
1106 			!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) {
1107 		dev_dbg(kbdev->dev, "Hardware does not need replay workaround");
1108 
1109 		/* Signal failure to userspace */
1110 		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1111 
1112 		return false;
1113 	}
1114 
1115 	if (katom->event_code == BASE_JD_EVENT_DONE) {
1116 		dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n");
1117 
1118 		if (katom->retry_count)
1119 			kbase_disjoint_state_down(kbdev);
1120 
1121 		return false;
1122 	}
1123 
1124 	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
1125 		dev_dbg(kbdev->dev, "Not replaying; context is dying\n");
1126 
1127 		if (katom->retry_count)
1128 			kbase_disjoint_state_down(kbdev);
1129 
1130 		return false;
1131 	}
1132 
1133 	/* Check job exception type and source before replaying. */
1134 	if (!kbase_replay_fault_check(katom)) {
1135 		dev_dbg(kbdev->dev,
1136 			"Replay cancelled on event %x\n", katom->event_code);
1137 		/* katom->event_code is already set to the failure code of the
1138 		 * previous job.
1139 		 */
1140 		return false;
1141 	}
1142 
1143 	dev_warn(kbdev->dev, "Replaying jobs retry=%d\n",
1144 			katom->retry_count);
1145 
1146 	katom->retry_count++;
1147 
1148 	if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
1149 		dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n");
1150 
1151 		kbase_disjoint_state_down(kbdev);
1152 
1153 		/* katom->event_code is already set to the failure code of the
1154 		   previous job */
1155 		return false;
1156 	}
1157 
1158 	/* only enter the disjoint state once for the whole time while the replay is ongoing */
1159 	if (katom->retry_count == 1)
1160 		kbase_disjoint_state_up(kbdev);
1161 
1162 	INIT_WORK(&katom->work, kbase_replay_process_worker);
1163 	queue_work(kctx->event_workq, &katom->work);
1164 
1165 	return true;
1166 }
1167