1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/delay.h>
13 #include <linux/iopoll.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/of_platform.h>
18 #include <linux/slab.h>
19 #include <linux/seq_file.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <linux/nospec.h>
24 #include <soc/rockchip/pm_domains.h>
25 
26 #include "mpp_debug.h"
27 #include "mpp_common.h"
28 #include "mpp_iommu.h"
29 #include "hack/mpp_hack_px30.h"
30 
31 #define VEPU2_DRIVER_NAME		"mpp_vepu2"
32 
33 #define	VEPU2_SESSION_MAX_BUFFERS		20
34 /* The maximum registers number of all the version */
35 #define VEPU2_REG_NUM				184
36 #define VEPU2_REG_HW_ID_INDEX		-1 /* INVALID */
37 #define VEPU2_REG_START_INDEX			0
38 #define VEPU2_REG_END_INDEX			183
39 
40 #define VEPU2_REG_ENC_EN			0x19c
41 #define VEPU2_REG_ENC_EN_INDEX			(103)
42 #define VEPU2_ENC_START				BIT(0)
43 
44 #define VEPU2_GET_FORMAT(x)			(((x) >> 4) & 0x3)
45 #define VEPU2_FORMAT_MASK			(0x30)
46 #define VEPU2_GET_WIDTH(x)			(((x >> 8) & 0x1ff) << 4)
47 #define VEPU2_GET_HEIGHT(x)			(((x >> 20) & 0x1ff) << 4)
48 
49 #define VEPU2_FMT_RESERVED			(0)
50 #define VEPU2_FMT_VP8E				(1)
51 #define VEPU2_FMT_JPEGE				(2)
52 #define VEPU2_FMT_H264E				(3)
53 
54 #define VEPU2_REG_MB_CTRL			0x1a0
55 #define VEPU2_REG_MB_CTRL_INDEX			(104)
56 
57 #define VEPU2_REG_INT				0x1b4
58 #define VEPU2_REG_INT_INDEX			(109)
59 #define VEPU2_MV_SAD_WR_EN			BIT(24)
60 #define VEPU2_ROCON_WRITE_DIS			BIT(20)
61 #define VEPU2_INT_SLICE_EN			BIT(16)
62 #define VEPU2_CLOCK_GATE_EN			BIT(12)
63 #define VEPU2_INT_TIMEOUT_EN			BIT(10)
64 #define VEPU2_INT_CLEAR				BIT(9)
65 #define VEPU2_IRQ_DIS				BIT(8)
66 #define VEPU2_INT_TIMEOUT			BIT(6)
67 #define VEPU2_INT_BUF_FULL			BIT(5)
68 #define VEPU2_INT_BUS_ERROR			BIT(4)
69 #define VEPU2_INT_SLICE				BIT(2)
70 #define VEPU2_INT_RDY				BIT(1)
71 #define VEPU2_INT_RAW				BIT(0)
72 
73 #define RKVPUE2_REG_DMV_4P_1P(i)		(0x1e0 + ((i) << 4))
74 #define RKVPUE2_REG_DMV_4P_1P_INDEX(i)		(120 + (i))
75 
76 #define VEPU2_REG_CLR_CACHE_BASE		0xc10
77 
78 #define to_vepu_task(task)		\
79 		container_of(task, struct vepu_task, mpp_task)
80 #define to_vepu_dev(dev)		\
81 		container_of(dev, struct vepu_dev, mpp)
82 
83 struct vepu_task {
84 	struct mpp_task mpp_task;
85 
86 	enum MPP_CLOCK_MODE clk_mode;
87 	u32 reg[VEPU2_REG_NUM];
88 
89 	struct reg_offset_info off_inf;
90 	u32 irq_status;
91 	/* req for current task */
92 	u32 w_req_cnt;
93 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
94 	u32 r_req_cnt;
95 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
96 	/* image info */
97 	u32 width;
98 	u32 height;
99 	u32 pixels;
100 };
101 
102 struct vepu_session_priv {
103 	struct rw_semaphore rw_sem;
104 	/* codec info from user */
105 	struct {
106 		/* show mode */
107 		u32 flag;
108 		/* item data */
109 		u64 val;
110 	} codec_info[ENC_INFO_BUTT];
111 };
112 
113 struct vepu_dev {
114 	struct mpp_dev mpp;
115 
116 	struct mpp_clk_info aclk_info;
117 	struct mpp_clk_info hclk_info;
118 	u32 default_max_load;
119 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
120 	struct proc_dir_entry *procfs;
121 #endif
122 	struct reset_control *rst_a;
123 	struct reset_control *rst_h;
124 	/* for ccu(central control unit) */
125 	struct vepu_ccu *ccu;
126 	struct list_head core_link;
127 	bool disable_work;
128 };
129 
130 struct vepu_ccu {
131 	u32 core_num;
132 	/* lock for core attach */
133 	struct mutex lock;
134 	struct list_head core_list;
135 	struct mpp_dev *main_core;
136 };
137 
138 static struct mpp_hw_info vepu_v2_hw_info = {
139 	.reg_num = VEPU2_REG_NUM,
140 	.reg_id = VEPU2_REG_HW_ID_INDEX,
141 	.reg_start = VEPU2_REG_START_INDEX,
142 	.reg_end = VEPU2_REG_END_INDEX,
143 	.reg_en = VEPU2_REG_ENC_EN_INDEX,
144 };
145 
146 /*
147  * file handle translate information
148  */
149 static const u16 trans_tbl_default[] = {
150 	48, 49, 50, 56, 57, 63, 64, 77, 78, 81
151 };
152 
153 static const u16 trans_tbl_vp8e[] = {
154 	27, 44, 45, 48, 49, 50, 56, 57, 63, 64,
155 	76, 77, 78, 80, 81, 106, 108,
156 };
157 
158 static struct mpp_trans_info trans_rk_vepu2[] = {
159 	[VEPU2_FMT_RESERVED] = {
160 		.count = 0,
161 		.table = NULL,
162 	},
163 	[VEPU2_FMT_VP8E] = {
164 		.count = ARRAY_SIZE(trans_tbl_vp8e),
165 		.table = trans_tbl_vp8e,
166 	},
167 	[VEPU2_FMT_JPEGE] = {
168 		.count = ARRAY_SIZE(trans_tbl_default),
169 		.table = trans_tbl_default,
170 	},
171 	[VEPU2_FMT_H264E] = {
172 		.count = ARRAY_SIZE(trans_tbl_default),
173 		.table = trans_tbl_default,
174 	},
175 };
176 
vepu_process_reg_fd(struct mpp_session *session, struct vepu_task *task, struct mpp_task_msgs *msgs)177 static int vepu_process_reg_fd(struct mpp_session *session,
178 			       struct vepu_task *task,
179 			       struct mpp_task_msgs *msgs)
180 {
181 	int ret;
182 	int fmt = VEPU2_GET_FORMAT(task->reg[VEPU2_REG_ENC_EN_INDEX]);
183 
184 	ret = mpp_translate_reg_address(session, &task->mpp_task,
185 					fmt, task->reg, &task->off_inf);
186 	if (ret)
187 		return ret;
188 
189 	mpp_translate_reg_offset_info(&task->mpp_task,
190 				      &task->off_inf, task->reg);
191 
192 	return 0;
193 }
194 
vepu_extract_task_msg(struct vepu_task *task, struct mpp_task_msgs *msgs)195 static int vepu_extract_task_msg(struct vepu_task *task,
196 				 struct mpp_task_msgs *msgs)
197 {
198 	u32 i;
199 	int ret;
200 	struct mpp_request *req;
201 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
202 
203 	for (i = 0; i < msgs->req_cnt; i++) {
204 		u32 off_s, off_e;
205 
206 		req = &msgs->reqs[i];
207 		if (!req->size)
208 			continue;
209 
210 		switch (req->cmd) {
211 		case MPP_CMD_SET_REG_WRITE: {
212 			off_s = hw_info->reg_start * sizeof(u32);
213 			off_e = hw_info->reg_end * sizeof(u32);
214 			ret = mpp_check_req(req, 0, sizeof(task->reg),
215 					    off_s, off_e);
216 			if (ret)
217 				continue;
218 			if (copy_from_user((u8 *)task->reg + req->offset,
219 					   req->data, req->size)) {
220 				mpp_err("copy_from_user reg failed\n");
221 				return -EIO;
222 			}
223 			memcpy(&task->w_reqs[task->w_req_cnt++],
224 			       req, sizeof(*req));
225 		} break;
226 		case MPP_CMD_SET_REG_READ: {
227 			off_s = hw_info->reg_start * sizeof(u32);
228 			off_e = hw_info->reg_end * sizeof(u32);
229 			ret = mpp_check_req(req, 0, sizeof(task->reg),
230 					    off_s, off_e);
231 			if (ret)
232 				continue;
233 			memcpy(&task->r_reqs[task->r_req_cnt++],
234 			       req, sizeof(*req));
235 		} break;
236 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
237 			mpp_extract_reg_offset_info(&task->off_inf, req);
238 		} break;
239 		default:
240 			break;
241 		}
242 	}
243 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
244 		  task->w_req_cnt, task->r_req_cnt);
245 
246 	return 0;
247 }
248 
vepu_alloc_task(struct mpp_session *session, struct mpp_task_msgs *msgs)249 static void *vepu_alloc_task(struct mpp_session *session,
250 			     struct mpp_task_msgs *msgs)
251 {
252 	int ret;
253 	struct mpp_task *mpp_task = NULL;
254 	struct vepu_task *task = NULL;
255 	struct mpp_dev *mpp = session->mpp;
256 
257 	mpp_debug_enter();
258 
259 	task = kzalloc(sizeof(*task), GFP_KERNEL);
260 	if (!task)
261 		return NULL;
262 
263 	mpp_task = &task->mpp_task;
264 	mpp_task_init(session, mpp_task);
265 	mpp_task->hw_info = mpp->var->hw_info;
266 	mpp_task->reg = task->reg;
267 	/* extract reqs for current task */
268 	ret = vepu_extract_task_msg(task, msgs);
269 	if (ret)
270 		goto fail;
271 	/* process fd in register */
272 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
273 		ret = vepu_process_reg_fd(session, task, msgs);
274 		if (ret)
275 			goto fail;
276 	}
277 	task->clk_mode = CLK_MODE_NORMAL;
278 	/* get resolution info */
279 	task->width = VEPU2_GET_WIDTH(task->reg[VEPU2_REG_ENC_EN_INDEX]);
280 	task->height = VEPU2_GET_HEIGHT(task->reg[VEPU2_REG_ENC_EN_INDEX]);
281 	task->pixels = task->width * task->height;
282 	mpp_debug(DEBUG_TASK_INFO, "width=%d, height=%d\n", task->width, task->height);
283 
284 	mpp_debug_leave();
285 
286 	return mpp_task;
287 
288 fail:
289 	mpp_task_dump_mem_region(mpp, mpp_task);
290 	mpp_task_dump_reg(mpp, mpp_task);
291 	mpp_task_finalize(session, mpp_task);
292 	kfree(task);
293 	return NULL;
294 }
295 
vepu_core_balance(struct vepu_ccu *ccu)296 static struct vepu_dev *vepu_core_balance(struct vepu_ccu *ccu)
297 {
298 	struct vepu_dev *enc;
299 	struct vepu_dev *core = NULL, *n;
300 
301 	mpp_debug_enter();
302 
303 	mutex_lock(&ccu->lock);
304 	enc = list_first_entry(&ccu->core_list, struct vepu_dev, core_link);
305 	list_for_each_entry_safe(core, n, &ccu->core_list, core_link) {
306 		mpp_debug(DEBUG_DEVICE, "%s, disable_work=%d, task_count=%d, task_index=%d\n",
307 			  dev_name(core->mpp.dev), core->disable_work,
308 			  atomic_read(&core->mpp.task_count), atomic_read(&core->mpp.task_index));
309 		/* if core (except main-core) disabled, skip it */
310 		if (core->disable_work)
311 			continue;
312 		/* choose core with less task in queue */
313 		if (atomic_read(&core->mpp.task_count) < atomic_read(&enc->mpp.task_count)) {
314 			enc = core;
315 			break;
316 		}
317 		/* choose core with less task which done */
318 		if (atomic_read(&core->mpp.task_index) < atomic_read(&enc->mpp.task_index))
319 			enc = core;
320 	}
321 	mutex_unlock(&ccu->lock);
322 
323 	mpp_debug_leave();
324 
325 	return enc;
326 }
327 
vepu_ccu_alloc_task(struct mpp_session *session, struct mpp_task_msgs *msgs)328 static void *vepu_ccu_alloc_task(struct mpp_session *session,
329 				 struct mpp_task_msgs *msgs)
330 {
331 	struct vepu_dev *enc = to_vepu_dev(session->mpp);
332 
333 	/* if multi-cores, choose one for current task */
334 	if (enc->ccu) {
335 		enc = vepu_core_balance(enc->ccu);
336 		session->mpp = &enc->mpp;
337 	}
338 
339 	return vepu_alloc_task(session, msgs);
340 }
341 
vepu_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)342 static int vepu_run(struct mpp_dev *mpp,
343 		    struct mpp_task *mpp_task)
344 {
345 	u32 i;
346 	u32 reg_en;
347 	struct vepu_task *task = to_vepu_task(mpp_task);
348 
349 	mpp_debug_enter();
350 
351 	/* clear cache */
352 	mpp_write_relaxed(mpp, VEPU2_REG_CLR_CACHE_BASE, 1);
353 
354 	reg_en = mpp_task->hw_info->reg_en;
355 	/* First, flush correct encoder format */
356 	mpp_write_relaxed(mpp, VEPU2_REG_ENC_EN,
357 			  task->reg[reg_en] & VEPU2_FORMAT_MASK);
358 	/* Second, flush others register */
359 	for (i = 0; i < task->w_req_cnt; i++) {
360 		struct mpp_request *req = &task->w_reqs[i];
361 		int s = req->offset / sizeof(u32);
362 		int e = s + req->size / sizeof(u32);
363 
364 		mpp_write_req(mpp, task->reg, s, e, reg_en);
365 	}
366 	/* init current task */
367 	mpp->cur_task = mpp_task;
368 	/* Last, flush the registers */
369 	wmb();
370 	mpp_write(mpp, VEPU2_REG_ENC_EN,
371 		  task->reg[reg_en] | VEPU2_ENC_START);
372 
373 	mpp_debug_leave();
374 
375 	return 0;
376 }
377 
vepu_irq(struct mpp_dev *mpp)378 static int vepu_irq(struct mpp_dev *mpp)
379 {
380 	mpp->irq_status = mpp_read(mpp, VEPU2_REG_INT);
381 	if (!(mpp->irq_status & VEPU2_INT_RAW))
382 		return IRQ_NONE;
383 
384 	mpp_write(mpp, VEPU2_REG_INT, 0);
385 
386 	return IRQ_WAKE_THREAD;
387 }
388 
vepu_isr(struct mpp_dev *mpp)389 static int vepu_isr(struct mpp_dev *mpp)
390 {
391 	u32 err_mask;
392 	struct vepu_task *task = NULL;
393 	struct mpp_task *mpp_task = mpp->cur_task;
394 
395 	/* FIXME use a spin lock here */
396 	if (!mpp_task) {
397 		dev_err(mpp->dev, "no current task\n");
398 		return IRQ_HANDLED;
399 	}
400 	mpp_time_diff(mpp_task);
401 	mpp->cur_task = NULL;
402 	task = to_vepu_task(mpp_task);
403 	task->irq_status = mpp->irq_status;
404 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
405 		  task->irq_status);
406 
407 	err_mask = VEPU2_INT_TIMEOUT
408 		| VEPU2_INT_BUF_FULL
409 		| VEPU2_INT_BUS_ERROR;
410 
411 	if (err_mask & task->irq_status)
412 		atomic_inc(&mpp->reset_request);
413 
414 	mpp_task_finish(mpp_task->session, mpp_task);
415 
416 	mpp_debug_leave();
417 
418 	return IRQ_HANDLED;
419 }
420 
vepu_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)421 static int vepu_finish(struct mpp_dev *mpp,
422 		       struct mpp_task *mpp_task)
423 {
424 	u32 i;
425 	u32 s, e;
426 	struct mpp_request *req;
427 	struct vepu_task *task = to_vepu_task(mpp_task);
428 
429 	mpp_debug_enter();
430 
431 	/* read register after running */
432 	for (i = 0; i < task->r_req_cnt; i++) {
433 		req = &task->r_reqs[i];
434 		s = req->offset / sizeof(u32);
435 		e = s + req->size / sizeof(u32);
436 		mpp_read_req(mpp, task->reg, s, e);
437 	}
438 	/* revert hack for irq status */
439 	task->reg[VEPU2_REG_INT_INDEX] = task->irq_status;
440 
441 	mpp_debug_leave();
442 
443 	return 0;
444 }
445 
vepu_result(struct mpp_dev *mpp, struct mpp_task *mpp_task, struct mpp_task_msgs *msgs)446 static int vepu_result(struct mpp_dev *mpp,
447 		       struct mpp_task *mpp_task,
448 		       struct mpp_task_msgs *msgs)
449 {
450 	u32 i;
451 	struct mpp_request *req;
452 	struct vepu_task *task = to_vepu_task(mpp_task);
453 
454 	/* FIXME may overflow the kernel */
455 	for (i = 0; i < task->r_req_cnt; i++) {
456 		req = &task->r_reqs[i];
457 
458 		if (copy_to_user(req->data,
459 				 (u8 *)task->reg + req->offset,
460 				 req->size)) {
461 			mpp_err("copy_to_user reg fail\n");
462 			return -EIO;
463 		}
464 	}
465 
466 	return 0;
467 }
468 
vepu_free_task(struct mpp_session *session, struct mpp_task *mpp_task)469 static int vepu_free_task(struct mpp_session *session,
470 			  struct mpp_task *mpp_task)
471 {
472 	struct vepu_task *task = to_vepu_task(mpp_task);
473 
474 	mpp_task_finalize(session, mpp_task);
475 	kfree(task);
476 
477 	return 0;
478 }
479 
vepu_control(struct mpp_session *session, struct mpp_request *req)480 static int vepu_control(struct mpp_session *session, struct mpp_request *req)
481 {
482 	switch (req->cmd) {
483 	case MPP_CMD_SEND_CODEC_INFO: {
484 		int i;
485 		int cnt;
486 		struct codec_info_elem elem;
487 		struct vepu_session_priv *priv;
488 
489 		if (!session || !session->priv) {
490 			mpp_err("session info null\n");
491 			return -EINVAL;
492 		}
493 		priv = session->priv;
494 
495 		cnt = req->size / sizeof(elem);
496 		cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
497 		mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
498 		for (i = 0; i < cnt; i++) {
499 			if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
500 				mpp_err("copy_from_user failed\n");
501 				continue;
502 			}
503 			if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT &&
504 			    elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
505 				elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
506 				priv->codec_info[elem.type].flag = elem.flag;
507 				priv->codec_info[elem.type].val = elem.data;
508 			} else {
509 				mpp_err("codec info invalid, type %d, flag %d\n",
510 					elem.type, elem.flag);
511 			}
512 		}
513 	} break;
514 	default: {
515 		mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
516 	} break;
517 	}
518 
519 	return 0;
520 }
521 
vepu_free_session(struct mpp_session *session)522 static int vepu_free_session(struct mpp_session *session)
523 {
524 	if (session && session->priv) {
525 		kfree(session->priv);
526 		session->priv = NULL;
527 	}
528 
529 	return 0;
530 }
531 
vepu_init_session(struct mpp_session *session)532 static int vepu_init_session(struct mpp_session *session)
533 {
534 	struct vepu_session_priv *priv;
535 
536 	if (!session) {
537 		mpp_err("session is null\n");
538 		return -EINVAL;
539 	}
540 
541 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
542 	if (!priv)
543 		return -ENOMEM;
544 
545 	init_rwsem(&priv->rw_sem);
546 	session->priv = priv;
547 
548 	return 0;
549 }
550 
551 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vepu_procfs_remove(struct mpp_dev *mpp)552 static int vepu_procfs_remove(struct mpp_dev *mpp)
553 {
554 	struct vepu_dev *enc = to_vepu_dev(mpp);
555 
556 	if (enc->procfs) {
557 		proc_remove(enc->procfs);
558 		enc->procfs = NULL;
559 	}
560 
561 	return 0;
562 }
563 
vepu_dump_session(struct mpp_session *session, struct seq_file *seq)564 static int vepu_dump_session(struct mpp_session *session, struct seq_file *seq)
565 {
566 	int i;
567 	struct vepu_session_priv *priv = session->priv;
568 
569 	down_read(&priv->rw_sem);
570 	/* item name */
571 	seq_puts(seq, "------------------------------------------------------");
572 	seq_puts(seq, "------------------------------------------------------\n");
573 	seq_printf(seq, "|%8s|", (const char *)"session");
574 	seq_printf(seq, "%8s|", (const char *)"device");
575 	for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
576 		bool show = priv->codec_info[i].flag;
577 
578 		if (show)
579 			seq_printf(seq, "%8s|", enc_info_item_name[i]);
580 	}
581 	seq_puts(seq, "\n");
582 	/* item data*/
583 	seq_printf(seq, "|%8p|", session);
584 	seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
585 	for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
586 		u32 flag = priv->codec_info[i].flag;
587 
588 		if (!flag)
589 			continue;
590 		if (flag == CODEC_INFO_FLAG_NUMBER) {
591 			u32 data = priv->codec_info[i].val;
592 
593 			seq_printf(seq, "%8d|", data);
594 		} else if (flag == CODEC_INFO_FLAG_STRING) {
595 			const char *name = (const char *)&priv->codec_info[i].val;
596 
597 			seq_printf(seq, "%8s|", name);
598 		} else {
599 			seq_printf(seq, "%8s|", (const char *)"null");
600 		}
601 	}
602 	seq_puts(seq, "\n");
603 	up_read(&priv->rw_sem);
604 
605 	return 0;
606 }
607 
vepu_show_session_info(struct seq_file *seq, void *offset)608 static int vepu_show_session_info(struct seq_file *seq, void *offset)
609 {
610 	struct mpp_session *session = NULL, *n;
611 	struct mpp_dev *mpp = seq->private;
612 
613 	mutex_lock(&mpp->srv->session_lock);
614 	list_for_each_entry_safe(session, n,
615 				 &mpp->srv->session_list,
616 				 session_link) {
617 		if (session->device_type != MPP_DEVICE_VEPU2)
618 			continue;
619 		if (!session->priv)
620 			continue;
621 		if (mpp->dev_ops->dump_session)
622 			mpp->dev_ops->dump_session(session, seq);
623 	}
624 	mutex_unlock(&mpp->srv->session_lock);
625 
626 	return 0;
627 }
628 
vepu_procfs_init(struct mpp_dev *mpp)629 static int vepu_procfs_init(struct mpp_dev *mpp)
630 {
631 	struct vepu_dev *enc = to_vepu_dev(mpp);
632 	char name[32];
633 
634 	if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name ||
635 	    !mpp->srv || !mpp->srv->procfs)
636 		return -EINVAL;
637 
638 	snprintf(name, sizeof(name) - 1, "%s%d",
639 		 mpp->dev->of_node->name, mpp->core_id);
640 
641 	enc->procfs = proc_mkdir(name, mpp->srv->procfs);
642 	if (IS_ERR_OR_NULL(enc->procfs)) {
643 		mpp_err("failed on open procfs\n");
644 		enc->procfs = NULL;
645 		return -EIO;
646 	}
647 	mpp_procfs_create_u32("aclk", 0644,
648 			      enc->procfs, &enc->aclk_info.debug_rate_hz);
649 	mpp_procfs_create_u32("session_buffers", 0644,
650 			      enc->procfs, &mpp->session_max_buffers);
651 	/* for show session info */
652 	proc_create_single_data("sessions-info", 0444,
653 				enc->procfs, vepu_show_session_info, mpp);
654 
655 	return 0;
656 }
657 
vepu_procfs_ccu_init(struct mpp_dev *mpp)658 static int vepu_procfs_ccu_init(struct mpp_dev *mpp)
659 {
660 	struct vepu_dev *enc = to_vepu_dev(mpp);
661 
662 	if (!enc->procfs)
663 		goto done;
664 
665 	mpp_procfs_create_u32("disable_work", 0644,
666 			      enc->procfs, &enc->disable_work);
667 done:
668 	return 0;
669 }
670 #else
vepu_procfs_remove(struct mpp_dev *mpp)671 static inline int vepu_procfs_remove(struct mpp_dev *mpp)
672 {
673 	return 0;
674 }
675 
vepu_procfs_init(struct mpp_dev *mpp)676 static inline int vepu_procfs_init(struct mpp_dev *mpp)
677 {
678 	return 0;
679 }
680 
vepu_procfs_ccu_init(struct mpp_dev *mpp)681 static inline int vepu_procfs_ccu_init(struct mpp_dev *mpp)
682 {
683 	return 0;
684 }
685 
vepu_dump_session(struct mpp_session *session, struct seq_file *seq)686 static inline int vepu_dump_session(struct mpp_session *session, struct seq_file *seq)
687 {
688 	return 0;
689 }
690 #endif
691 
vepu_init(struct mpp_dev *mpp)692 static int vepu_init(struct mpp_dev *mpp)
693 {
694 	int ret;
695 	struct vepu_dev *enc = to_vepu_dev(mpp);
696 
697 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VEPU2];
698 
699 	/* Get clock info from dtsi */
700 	ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
701 	if (ret)
702 		mpp_err("failed on clk_get aclk_vcodec\n");
703 	ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
704 	if (ret)
705 		mpp_err("failed on clk_get hclk_vcodec\n");
706 	/* Get normal max workload from dtsi */
707 	of_property_read_u32(mpp->dev->of_node,
708 			     "rockchip,default-max-load", &enc->default_max_load);
709 	/* Set default rates */
710 	mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
711 
712 	/* Get reset control from dtsi */
713 	enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
714 	if (!enc->rst_a)
715 		mpp_err("No aclk reset resource define\n");
716 	enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
717 	if (!enc->rst_h)
718 		mpp_err("No hclk reset resource define\n");
719 
720 	return 0;
721 }
722 
vepu_px30_init(struct mpp_dev *mpp)723 static int vepu_px30_init(struct mpp_dev *mpp)
724 {
725 	vepu_init(mpp);
726 	return px30_workaround_combo_init(mpp);
727 }
728 
vepu_clk_on(struct mpp_dev *mpp)729 static int vepu_clk_on(struct mpp_dev *mpp)
730 {
731 	struct vepu_dev *enc = to_vepu_dev(mpp);
732 
733 	mpp_clk_safe_enable(enc->aclk_info.clk);
734 	mpp_clk_safe_enable(enc->hclk_info.clk);
735 
736 	return 0;
737 }
738 
vepu_clk_off(struct mpp_dev *mpp)739 static int vepu_clk_off(struct mpp_dev *mpp)
740 {
741 	struct vepu_dev *enc = to_vepu_dev(mpp);
742 
743 	mpp_clk_safe_disable(enc->aclk_info.clk);
744 	mpp_clk_safe_disable(enc->hclk_info.clk);
745 
746 	return 0;
747 }
748 
vepu_get_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)749 static int vepu_get_freq(struct mpp_dev *mpp,
750 			 struct mpp_task *mpp_task)
751 {
752 	u32 task_cnt;
753 	u32 workload;
754 	struct mpp_task *loop = NULL, *n;
755 	struct vepu_dev *enc = to_vepu_dev(mpp);
756 	struct vepu_task *task = to_vepu_task(mpp_task);
757 
758 	/* if not set max load, consider not have advanced mode */
759 	if (!enc->default_max_load)
760 		return 0;
761 
762 	task_cnt = 1;
763 	workload = task->pixels;
764 	/* calc workload in pending list */
765 	mutex_lock(&mpp->queue->pending_lock);
766 	list_for_each_entry_safe(loop, n,
767 				 &mpp->queue->pending_list,
768 				 queue_link) {
769 		struct vepu_task *loop_task = to_vepu_task(loop);
770 
771 		task_cnt++;
772 		workload += loop_task->pixels;
773 	}
774 	mutex_unlock(&mpp->queue->pending_lock);
775 
776 	if (workload > enc->default_max_load)
777 		task->clk_mode = CLK_MODE_ADVANCED;
778 
779 	mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
780 		  task_cnt, workload, task->clk_mode);
781 
782 	return 0;
783 }
784 
vepu_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)785 static int vepu_set_freq(struct mpp_dev *mpp,
786 			 struct mpp_task *mpp_task)
787 {
788 	struct vepu_dev *enc = to_vepu_dev(mpp);
789 	struct vepu_task *task = to_vepu_task(mpp_task);
790 
791 	mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
792 
793 	return 0;
794 }
795 
vepu_reduce_freq(struct mpp_dev *mpp)796 static int vepu_reduce_freq(struct mpp_dev *mpp)
797 {
798 	struct vepu_dev *enc = to_vepu_dev(mpp);
799 
800 	mpp_clk_set_rate(&enc->aclk_info, CLK_MODE_REDUCE);
801 
802 	return 0;
803 }
804 
vepu_reset(struct mpp_dev *mpp)805 static int vepu_reset(struct mpp_dev *mpp)
806 {
807 	struct vepu_dev *enc = to_vepu_dev(mpp);
808 
809 	if (enc->rst_a && enc->rst_h) {
810 		/* Don't skip this or iommu won't work after reset */
811 		mpp_pmu_idle_request(mpp, true);
812 		mpp_safe_reset(enc->rst_a);
813 		mpp_safe_reset(enc->rst_h);
814 		udelay(5);
815 		mpp_safe_unreset(enc->rst_a);
816 		mpp_safe_unreset(enc->rst_h);
817 		mpp_pmu_idle_request(mpp, false);
818 	}
819 	mpp_write(mpp, VEPU2_REG_INT, VEPU2_INT_CLEAR);
820 
821 	return 0;
822 }
823 
824 static struct mpp_hw_ops vepu_v2_hw_ops = {
825 	.init = vepu_init,
826 	.clk_on = vepu_clk_on,
827 	.clk_off = vepu_clk_off,
828 	.get_freq = vepu_get_freq,
829 	.set_freq = vepu_set_freq,
830 	.reduce_freq = vepu_reduce_freq,
831 	.reset = vepu_reset,
832 };
833 
834 static struct mpp_hw_ops vepu_px30_hw_ops = {
835 	.init = vepu_px30_init,
836 	.clk_on = vepu_clk_on,
837 	.clk_off = vepu_clk_off,
838 	.set_freq = vepu_set_freq,
839 	.reduce_freq = vepu_reduce_freq,
840 	.reset = vepu_reset,
841 	.set_grf = px30_workaround_combo_switch_grf,
842 };
843 
844 static struct mpp_dev_ops vepu_v2_dev_ops = {
845 	.alloc_task = vepu_alloc_task,
846 	.run = vepu_run,
847 	.irq = vepu_irq,
848 	.isr = vepu_isr,
849 	.finish = vepu_finish,
850 	.result = vepu_result,
851 	.free_task = vepu_free_task,
852 	.ioctl = vepu_control,
853 	.init_session = vepu_init_session,
854 	.free_session = vepu_free_session,
855 	.dump_session = vepu_dump_session,
856 };
857 
858 static struct mpp_dev_ops vepu_ccu_dev_ops = {
859 	.alloc_task = vepu_ccu_alloc_task,
860 	.run = vepu_run,
861 	.irq = vepu_irq,
862 	.isr = vepu_isr,
863 	.finish = vepu_finish,
864 	.result = vepu_result,
865 	.free_task = vepu_free_task,
866 	.ioctl = vepu_control,
867 	.init_session = vepu_init_session,
868 	.free_session = vepu_free_session,
869 	.dump_session = vepu_dump_session,
870 };
871 
872 
873 static const struct mpp_dev_var vepu_v2_data = {
874 	.device_type = MPP_DEVICE_VEPU2,
875 	.hw_info = &vepu_v2_hw_info,
876 	.trans_info = trans_rk_vepu2,
877 	.hw_ops = &vepu_v2_hw_ops,
878 	.dev_ops = &vepu_v2_dev_ops,
879 };
880 
881 static const struct mpp_dev_var vepu_px30_data = {
882 	.device_type = MPP_DEVICE_VEPU2,
883 	.hw_info = &vepu_v2_hw_info,
884 	.trans_info = trans_rk_vepu2,
885 	.hw_ops = &vepu_px30_hw_ops,
886 	.dev_ops = &vepu_v2_dev_ops,
887 };
888 
889 static const struct mpp_dev_var vepu_ccu_data = {
890 	.device_type = MPP_DEVICE_VEPU2,
891 	.hw_info = &vepu_v2_hw_info,
892 	.trans_info = trans_rk_vepu2,
893 	.hw_ops = &vepu_v2_hw_ops,
894 	.dev_ops = &vepu_ccu_dev_ops,
895 };
896 
897 static const struct of_device_id mpp_vepu2_dt_match[] = {
898 	{
899 		.compatible = "rockchip,vpu-encoder-v2",
900 		.data = &vepu_v2_data,
901 	},
902 #ifdef CONFIG_CPU_PX30
903 	{
904 		.compatible = "rockchip,vpu-encoder-px30",
905 		.data = &vepu_px30_data,
906 	},
907 #endif
908 #ifdef CONFIG_CPU_RK3588
909 	{
910 		.compatible = "rockchip,vpu-encoder-v2-core",
911 		.data = &vepu_ccu_data,
912 	},
913 	{
914 		.compatible = "rockchip,vpu-encoder-v2-ccu",
915 	},
916 #endif
917 	{},
918 };
919 
vepu_ccu_probe(struct platform_device *pdev)920 static int vepu_ccu_probe(struct platform_device *pdev)
921 {
922 	struct vepu_ccu *ccu;
923 	struct device *dev = &pdev->dev;
924 
925 	ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL);
926 	if (!ccu)
927 		return -ENOMEM;
928 
929 	platform_set_drvdata(pdev, ccu);
930 	mutex_init(&ccu->lock);
931 	INIT_LIST_HEAD(&ccu->core_list);
932 
933 	return 0;
934 }
935 
vepu_attach_ccu(struct device *dev, struct vepu_dev *enc)936 static int vepu_attach_ccu(struct device *dev, struct vepu_dev *enc)
937 {
938 	struct device_node *np;
939 	struct platform_device *pdev;
940 	struct vepu_ccu *ccu;
941 
942 	np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
943 	if (!np || !of_device_is_available(np))
944 		return -ENODEV;
945 
946 	pdev = of_find_device_by_node(np);
947 	of_node_put(np);
948 	if (!pdev)
949 		return -ENODEV;
950 
951 	ccu = platform_get_drvdata(pdev);
952 	if (!ccu)
953 		return -ENOMEM;
954 
955 	INIT_LIST_HEAD(&enc->core_link);
956 	mutex_lock(&ccu->lock);
957 	ccu->core_num++;
958 	list_add_tail(&enc->core_link, &ccu->core_list);
959 	mutex_unlock(&ccu->lock);
960 
961 	/* attach the ccu-domain to current core */
962 	if (!ccu->main_core) {
963 		/**
964 		 * set the first device for the main-core,
965 		 * then the domain of the main-core named ccu-domain
966 		 */
967 		ccu->main_core = &enc->mpp;
968 	} else {
969 		struct mpp_iommu_info *ccu_info, *cur_info;
970 
971 		/* set the ccu domain for current device */
972 		ccu_info = ccu->main_core->iommu_info;
973 		cur_info = enc->mpp.iommu_info;
974 
975 		cur_info->domain = ccu_info->domain;
976 		mpp_iommu_attach(cur_info);
977 	}
978 	enc->ccu = ccu;
979 
980 	dev_info(dev, "attach ccu success\n");
981 	return 0;
982 }
983 
vepu_core_probe(struct platform_device *pdev)984 static int vepu_core_probe(struct platform_device *pdev)
985 {
986 	struct device *dev = &pdev->dev;
987 	struct vepu_dev *enc = NULL;
988 	struct mpp_dev *mpp = NULL;
989 	const struct of_device_id *match = NULL;
990 	int ret = 0;
991 
992 	enc = devm_kzalloc(dev, sizeof(struct vepu_dev), GFP_KERNEL);
993 	if (!enc)
994 		return -ENOMEM;
995 
996 	mpp = &enc->mpp;
997 	platform_set_drvdata(pdev, enc);
998 
999 	if (pdev->dev.of_node) {
1000 		match = of_match_node(mpp_vepu2_dt_match, pdev->dev.of_node);
1001 		if (match)
1002 			mpp->var = (struct mpp_dev_var *)match->data;
1003 
1004 		mpp->core_id = of_alias_get_id(pdev->dev.of_node, "jpege");
1005 	}
1006 
1007 	ret = mpp_dev_probe(mpp, pdev);
1008 	if (ret) {
1009 		dev_err(dev, "probe sub driver failed\n");
1010 		return -EINVAL;
1011 	}
1012 	/* current device attach to ccu */
1013 	ret = vepu_attach_ccu(dev, enc);
1014 	if (ret)
1015 		return ret;
1016 
1017 	ret = devm_request_threaded_irq(dev, mpp->irq,
1018 					mpp_dev_irq,
1019 					mpp_dev_isr_sched,
1020 					IRQF_SHARED,
1021 					dev_name(dev), mpp);
1022 	if (ret) {
1023 		dev_err(dev, "register interrupter runtime failed\n");
1024 		return -EINVAL;
1025 	}
1026 
1027 	mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS;
1028 	vepu_procfs_init(mpp);
1029 	vepu_procfs_ccu_init(mpp);
1030 	/* if current is main-core, register current device to mpp service */
1031 	if (mpp == enc->ccu->main_core)
1032 		mpp_dev_register_srv(mpp, mpp->srv);
1033 
1034 	return 0;
1035 }
1036 
vepu_probe_default(struct platform_device *pdev)1037 static int vepu_probe_default(struct platform_device *pdev)
1038 {
1039 	struct device *dev = &pdev->dev;
1040 	struct vepu_dev *enc = NULL;
1041 	struct mpp_dev *mpp = NULL;
1042 	const struct of_device_id *match = NULL;
1043 	int ret = 0;
1044 
1045 	enc = devm_kzalloc(dev, sizeof(struct vepu_dev), GFP_KERNEL);
1046 	if (!enc)
1047 		return -ENOMEM;
1048 
1049 	mpp = &enc->mpp;
1050 	platform_set_drvdata(pdev, enc);
1051 
1052 	if (pdev->dev.of_node) {
1053 		match = of_match_node(mpp_vepu2_dt_match, pdev->dev.of_node);
1054 		if (match)
1055 			mpp->var = (struct mpp_dev_var *)match->data;
1056 	}
1057 
1058 	ret = mpp_dev_probe(mpp, pdev);
1059 	if (ret) {
1060 		dev_err(dev, "probe sub driver failed\n");
1061 		return -EINVAL;
1062 	}
1063 
1064 	ret = devm_request_threaded_irq(dev, mpp->irq,
1065 					mpp_dev_irq,
1066 					mpp_dev_isr_sched,
1067 					IRQF_SHARED,
1068 					dev_name(dev), mpp);
1069 	if (ret) {
1070 		dev_err(dev, "register interrupter runtime failed\n");
1071 		return -EINVAL;
1072 	}
1073 
1074 	mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS;
1075 	vepu_procfs_init(mpp);
1076 	/* register current device to mpp service */
1077 	mpp_dev_register_srv(mpp, mpp->srv);
1078 
1079 	return 0;
1080 }
1081 
vepu_probe(struct platform_device *pdev)1082 static int vepu_probe(struct platform_device *pdev)
1083 {
1084 	int ret;
1085 	struct device *dev = &pdev->dev;
1086 	struct device_node *np = dev->of_node;
1087 
1088 	dev_info(dev, "probing start\n");
1089 
1090 	if (strstr(np->name, "ccu"))
1091 		ret = vepu_ccu_probe(pdev);
1092 	else if (strstr(np->name, "core"))
1093 		ret = vepu_core_probe(pdev);
1094 	else
1095 		ret = vepu_probe_default(pdev);
1096 
1097 	dev_info(dev, "probing finish\n");
1098 
1099 	return ret;
1100 }
1101 
vepu_remove(struct platform_device *pdev)1102 static int vepu_remove(struct platform_device *pdev)
1103 {
1104 	struct device *dev = &pdev->dev;
1105 	struct device_node *np = dev->of_node;
1106 
1107 	if (strstr(np->name, "ccu")) {
1108 		dev_info(dev, "remove ccu device\n");
1109 	} else if (strstr(np->name, "core")) {
1110 		struct vepu_dev *enc = platform_get_drvdata(pdev);
1111 
1112 		dev_info(dev, "remove core\n");
1113 		if (enc->ccu) {
1114 			mutex_lock(&enc->ccu->lock);
1115 			list_del_init(&enc->core_link);
1116 			enc->ccu->core_num--;
1117 			mutex_unlock(&enc->ccu->lock);
1118 		}
1119 		mpp_dev_remove(&enc->mpp);
1120 		vepu_procfs_remove(&enc->mpp);
1121 	} else {
1122 		struct vepu_dev *enc = platform_get_drvdata(pdev);
1123 
1124 		dev_info(dev, "remove device\n");
1125 		mpp_dev_remove(&enc->mpp);
1126 		vepu_procfs_remove(&enc->mpp);
1127 	}
1128 
1129 	return 0;
1130 }
1131 
vepu_shutdown(struct platform_device *pdev)1132 static void vepu_shutdown(struct platform_device *pdev)
1133 {
1134 	struct device *dev = &pdev->dev;
1135 
1136 	if (!strstr(dev_name(dev), "ccu")) {
1137 		int ret;
1138 		int val;
1139 		struct vepu_dev *enc = platform_get_drvdata(pdev);
1140 		struct mpp_dev *mpp = &enc->mpp;
1141 
1142 		dev_info(dev, "shutdown device\n");
1143 
1144 		if (mpp->srv)
1145 			atomic_inc(&mpp->srv->shutdown_request);
1146 
1147 		ret = readx_poll_timeout(atomic_read,
1148 					 &mpp->task_count,
1149 					 val, val == 0, 20000, 200000);
1150 		if (ret == -ETIMEDOUT)
1151 			dev_err(dev, "wait total running time out\n");
1152 	}
1153 	dev_info(dev, "shutdown success\n");
1154 }
1155 
1156 struct platform_driver rockchip_vepu2_driver = {
1157 	.probe = vepu_probe,
1158 	.remove = vepu_remove,
1159 	.shutdown = vepu_shutdown,
1160 	.driver = {
1161 		.name = VEPU2_DRIVER_NAME,
1162 		.of_match_table = of_match_ptr(mpp_vepu2_dt_match),
1163 	},
1164 };
1165 EXPORT_SYMBOL(rockchip_vepu2_driver);
1166